コード例 #1
0
    def rgb_processing(self, rgb_img, center, scale, rot, flip, pn):

        bDebug = False

        if bDebug:  #Debug visualize
            from renderer import viewer2D
            viewer2D.ImShow(rgb_img.astype(np.uint8), waitTime=0)
        """Process rgb image and do augmentation."""
        rgb_img = crop(rgb_img,
                       center,
                       scale, [constants.IMG_RES, constants.IMG_RES],
                       rot=rot)
        # rgb_img, boxScale_o2n, bboxTopLeft  = crop_bboxInfo(rgb_img, center, scale,
        #               [constants.IMG_RES, constants.IMG_RES], rot=rot)

        # img, boxScale_o2n, bboxTopLeft = crop_bboxInfo(img, center, scale, (input_res, input_res))

        if rgb_img is None:
            return None

        # flip the image
        if flip:
            rgb_img = flip_img(rgb_img)
        # in the rgb image we add pixel noise in a channel-wise manner
        rgb_img[:, :,
                0] = np.minimum(255.0, np.maximum(0.0,
                                                  rgb_img[:, :, 0] * pn[0]))
        rgb_img[:, :,
                1] = np.minimum(255.0, np.maximum(0.0,
                                                  rgb_img[:, :, 1] * pn[1]))
        rgb_img[:, :,
                2] = np.minimum(255.0, np.maximum(0.0,
                                                  rgb_img[:, :, 2] * pn[2]))

        if bDebug:  #Debug visualize
            print("center{}, scale{}, rot{}, flip{}, pn{}".format(
                center, scale, rot, flip, pn))
            from renderer import viewer2D
            viewer2D.ImShow(rgb_img, waitTime=0, name='cropped')

        # (3,224,224),float,[0,1]
        rgb_img = np.transpose(rgb_img.astype('float32'), (2, 0, 1)) / 255.0
        return rgb_img
コード例 #2
0
def denormImg(image_tensor):
    image_np = de_normalize_img(image_tensor).cpu().numpy()
    image_np = np.transpose(image_np, (1, 2, 0)) * 255.0
    image_np = image_np[:, :, [2, 1, 0]]

    #Denormalize image
    image_np = np.ascontiguousarray(image_np, dtype=np.uint8)
    # originalImgVis = curImgVis.copy()
    viewer2D.ImShow(image_np, name='denormImg')

    return image_np
コード例 #3
0
ファイル: visualizer.py プロジェクト: zhly0/eft
    def visualize_gui_naive(self,
                            meshList,
                            skelList,
                            bboxXYWH_list=[],
                            img_original=None):
        """
            args:
                meshList: list of {'ver': pred_vertices, 'f': smpl.faces}
                skelList: list of [JointNum*3, 1]       (where 1 means num. of frames in glviewer)
                bbr_list: list of [x,y,w,h] 
        """
        if img_original is not None:
            if len(bboxXYWH_list) > 0:
                for bbr in bboxXYWH_list:
                    viewer2D.Vis_Bbox(img_original, bbr)
            viewer2D.ImShow(img_original)
            glViewer.setWindowSize(img_original.shape[1],
                                   img_original.shape[0])
        # glViewer.setRenderOutputSize(inputImg.shape[1],inputImg.shape[0])
        glViewer.setBackgroundTexture(img_original)
        glViewer.SetOrthoCamera(True)
        glViewer.setMeshData(
            meshList, bComputeNormal=True
        )  # meshes = {'ver': pred_vertices, 'f': smplWrapper.f}
        glViewer.setSkeleton(skelList)

        if True:  #Save to File
            if True:  #Cam view rendering
                # glViewer.setSaveFolderName(overlaidImageFolder)
                glViewer.setNearPlane(50)
                if img_original is not None:
                    glViewer.setWindowSize(img_original.shape[1],
                                           img_original.shape[0])
                # glViewer.show_SMPL(bSaveToFile = True, bResetSaveImgCnt = False, countImg = False, mode = 'camera')
                glViewer.show(1)

            if False:  #Side view rendering
                # glViewer.setSaveFolderName(sideImageFolder)
                glViewer.setNearPlane(50)
                if img_original is not None:
                    glViewer.setWindowSize(img_original.shape[1],
                                           img_original.shape[0])
                glViewer.show_SMPL(bSaveToFile=True,
                                   bResetSaveImgCnt=False,
                                   countImg=True,
                                   zoom=1108,
                                   mode='youtube')
コード例 #4
0
def loadFoot():
    cocoImgDir = '/run/media/hjoo/disk/data/coco2017/train2017'
    footAnnotFile = '/run/media/hjoo/disk/data/cmu_foot/person_keypoints_train2017_foot_v1.json'
    with open(footAnnotFile, 'rb') as f:
        json_data = json.load(f)

    imgs = {}
    for img in json_data['images']:
        imgs[img['id']] = img

    footAnnot = {}
    for annot in json_data['annotations']:

        keypoints = annot['keypoints']

        # keypoints = np.reshape(keypoints, (17,3))     #original coco
        keypoints = np.reshape(keypoints, (23, 3))  #original coco
        keypoints[keypoints[:, 2] > 0, 2] = 1

        footCnt = sum(keypoints[-6:, -1])  #Last 6 keypoints are for foot
        if footCnt < 6:
            continue

        image_id = annot['image_id']
        annot_id = annot['id']
        imageName = imgs[image_id]['file_name']

        footAnnot[annot_id] = keypoints[-6:, ]
        continue

        if True:
            imgPath = os.path.join(cocoImgDir, imageName)
            raw_img = cv2.imread(imgPath)
            raw_img = viewer2D.Vis_Skeleton_2D_foot(keypoints[-6:, :-1],
                                                    keypoints[-6:, -1],
                                                    image=raw_img)
            viewer2D.ImShow(raw_img, waitTime=0)

        # 0: Left big toe.
        # 1: Left small toe.
        # 2: Left heel.
        # 3: Right big toe.
        # 4: Right small toe.
        # 5: Right heel.
        continue

    return footAnnot
コード例 #5
0
ファイル: eval.py プロジェクト: facebookresearch/eft
def run_evaluation(model, dataset_name, dataset, result_file,
                   batch_size=32, img_res=224, 
                   num_workers=32, shuffle=False, log_freq=50, bVerbose= True):
    """Run evaluation on the datasets and metrics we report in the paper. """

    print(dataset_name)


    device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
    # # Transfer model to the GPU
    # model.to(device)

    # Load SMPL model
    global g_smpl_neutral, g_smpl_male, g_smpl_female
    if g_smpl_neutral is None:
        g_smpl_neutral = SMPL(config.SMPL_MODEL_DIR,
                            create_transl=False).to(device)
        
        # g_smpl_neutral = SMPLX(config.SMPL_MODEL_DIR,
        #                     create_transl=False).to(device)
                                     
        g_smpl_male = SMPL(config.SMPL_MODEL_DIR,
                        gender='male',
                        create_transl=False).to(device)
        g_smpl_female = SMPL(config.SMPL_MODEL_DIR,
                        gender='female',
                        create_transl=False).to(device)

        smpl_neutral = g_smpl_neutral
        smpl_male = g_smpl_male
        smpl_female = g_smpl_female
    else:
        smpl_neutral = g_smpl_neutral
        smpl_male = g_smpl_male
        smpl_female = g_smpl_female

    # renderer = PartRenderer()    
    # Regressor for H36m joints
    J_regressor = torch.from_numpy(np.load(config.JOINT_REGRESSOR_H36M)).float()
    
    save_results = result_file is not None
    # Disable shuffling if you want to save the results
    if save_results:
        shuffle=False
    # Create dataloader for the dataset
    data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
    
    # Pose metrics
    # MPJPE and Reconstruction error for the non-parametric and parametric shapes
    # mpjpe = np.zeros(len(dataset))
    # recon_err = np.zeros(len(dataset))
    quant_mpjpe = {}#np.zeros(len(dataset))
    quant_recon_err = {}#np.zeros(len(dataset))
    mpjpe = np.zeros(len(dataset))
    recon_err = np.zeros(len(dataset))

    mpjpe_smpl = np.zeros(len(dataset))
    recon_err_smpl = np.zeros(len(dataset))

    # Shape metrics
    # Mean per-vertex error
    shape_err = np.zeros(len(dataset))
    shape_err_smpl = np.zeros(len(dataset))

    # Mask and part metrics
    # Accuracy
    accuracy = 0.
    parts_accuracy = 0.
    # True positive, false positive and false negative
    tp = np.zeros((2,1))
    fp = np.zeros((2,1))
    fn = np.zeros((2,1))
    parts_tp = np.zeros((7,1))
    parts_fp = np.zeros((7,1))
    parts_fn = np.zeros((7,1))
    # Pixel count accumulators
    pixel_count = 0
    parts_pixel_count = 0

    # Store SMPL parameters
    smpl_pose = np.zeros((len(dataset), 72))
    smpl_betas = np.zeros((len(dataset), 10))
    smpl_camera = np.zeros((len(dataset), 3))
    pred_joints = np.zeros((len(dataset), 17, 3))

    eval_pose = False
    eval_masks = False
    eval_parts = False
    # Choose appropriate evaluation for each dataset
    if dataset_name == 'h36m-p1' or dataset_name == 'h36m-p2'  or dataset_name == 'lspet-test' \
                        or dataset_name == '3dpw' or dataset_name == 'coco2014-val-3d-amt'  or dataset_name == 'ochuman-test' \
                        or dataset_name == '3dpw-vibe'  or dataset_name == '3dpw-crop' or dataset_name == '3dpw-headcrop' or dataset_name == 'mpi-inf-3dhp-test':
        eval_pose = True
    elif dataset_name == 'lsp':
        eval_masks = True
        eval_parts = True
        annot_path = config.DATASET_FOLDERS['upi-s1h']

    joint_mapper_h36m = constants.H36M_TO_J17 if dataset_name == 'mpi-inf-3dhp-test' else constants.H36M_TO_J14
    joint_mapper_gt = constants.J24_TO_J17 if dataset_name == 'mpi-inf-3dhp-test' else constants.J24_TO_J14
    # Iterate over the entire dataset
    for step, batch in enumerate(tqdm(data_loader, desc='Eval', total=len(data_loader))):
        # Get ground truth annotations from the batch

        imgName = batch['imgname'][0]
        seqName = os.path.basename ( os.path.dirname(imgName) )

        gt_pose = batch['pose'].to(device)
        gt_betas = batch['betas'].to(device)
        gt_vertices = smpl_neutral(betas=gt_betas, body_pose=gt_pose[:, 3:], global_orient=gt_pose[:, :3]).vertices
        images = batch['img'].to(device)
        gender = batch['gender'].to(device)
        curr_batch_size = images.shape[0]

        # gt_bbox_scale = batch['scale'].cpu().numpy()
        # gt_bbox_center = batch['center'].cpu().numpy()
        
        with torch.no_grad():
            pred_rotmat, pred_betas, pred_camera = model(images)
            pred_output = smpl_neutral(betas=pred_betas, body_pose=pred_rotmat[:,1:], global_orient=pred_rotmat[:,0].unsqueeze(1), pose2rot=False)
            pred_vertices = pred_output.vertices

        if save_results:
            rot_pad = torch.tensor([0,0,1], dtype=torch.float32, device=device).view(1,3,1)
            rotmat = torch.cat((pred_rotmat.view(-1, 3, 3), rot_pad.expand(curr_batch_size * 24, -1, -1)), dim=-1)
            pred_pose = tgm.rotation_matrix_to_angle_axis(rotmat).contiguous().view(-1, 72)
            smpl_pose[step * batch_size:step * batch_size + curr_batch_size, :] = pred_pose.cpu().numpy()
            smpl_betas[step * batch_size:step * batch_size + curr_batch_size, :]  = pred_betas.cpu().numpy()
            smpl_camera[step * batch_size:step * batch_size + curr_batch_size, :]  = pred_camera.cpu().numpy()

    
        # 3D pose evaluation
        if eval_pose:
            # Regressor broadcasting
            J_regressor_batch = J_regressor[None, :].expand(pred_vertices.shape[0], -1, -1).to(device)
            # Get 14 ground truth joints
            if 'h36m' in dataset_name or 'mpi-inf' in dataset_name:
                gt_keypoints_3d = batch['pose_3d'].cuda()
                gt_keypoints_3d = gt_keypoints_3d[:, joint_mapper_gt, :-1]
            # For 3DPW get the 14 common joints from the rendered shape
            else:
                gt_vertices = smpl_male(global_orient=gt_pose[:,:3], body_pose=gt_pose[:,3:], betas=gt_betas).vertices 
                gt_vertices_female = smpl_female(global_orient=gt_pose[:,:3], body_pose=gt_pose[:,3:], betas=gt_betas).vertices 

                if seqName=='val2014':
                    gt_vertices_neutral = smpl_neutral(global_orient=gt_pose[:,:3], body_pose=gt_pose[:,3:], betas=gt_betas).vertices 
                    gt_vertices = gt_vertices_neutral
                else:
                    gt_vertices[gender==1, :, :] = gt_vertices_female[gender==1, :, :]
                gt_keypoints_3d = torch.matmul(J_regressor_batch, gt_vertices)
                gt_pelvis = gt_keypoints_3d[:, [0],:].clone()
                gt_keypoints_3d = gt_keypoints_3d[:, joint_mapper_h36m, :]
                gt_keypoints_3d = gt_keypoints_3d - gt_pelvis             


            # Get 14 predicted joints from the mesh
            pred_keypoints_3d = torch.matmul(J_regressor_batch, pred_vertices)
            if save_results:
                pred_joints[step * batch_size:step * batch_size + curr_batch_size, :, :]  = pred_keypoints_3d.cpu().numpy()
            pred_pelvis = pred_keypoints_3d[:, [0],:].clone()
            pred_keypoints_3d = pred_keypoints_3d[:, joint_mapper_h36m, :]
            pred_keypoints_3d = pred_keypoints_3d - pred_pelvis 

            # Absolute error (MPJPE)
            error = torch.sqrt(((pred_keypoints_3d - gt_keypoints_3d) ** 2).sum(dim=-1)).mean(dim=-1).cpu().numpy()

            error_upper = torch.sqrt(((pred_keypoints_3d - gt_keypoints_3d) ** 2).sum(dim=-1)).mean(dim=-1).cpu().numpy()
            # mpjpe[step * batch_size:step * batch_size + curr_batch_size] = error

            # Reconstuction_error
            r_error = reconstruction_error(pred_keypoints_3d.cpu().numpy(), gt_keypoints_3d.cpu().numpy(), reduction=None)

            r_error_upper = reconstruction_error(pred_keypoints_3d.cpu().numpy(), gt_keypoints_3d.cpu().numpy(), reduction=None)
            # recon_err[step * batch_size:step * batch_size + curr_batch_size] = r_error

            #Visualize GT vs prediction
            if False:
                from renderer import viewer2D
                from renderer import glViewer
                import humanModelViewer

                gt_cam_param = batch['cam_param'].cpu().numpy()
                pred_cam_param = pred_camera.detach().cpu().numpy()

                batchNum = gt_pose.shape[0]
                for i in range(batchNum):
                    curImgVis = deNormalizeBatchImg(images[i].cpu())
                    viewer2D.ImShow(curImgVis, name='rawIm', scale=1.0)

                    #move mesh to bbox space
                    vert_gt = gt_vertices[i].cpu().numpy()
                    vert_gt = convert_smpl_to_bbox(vert_gt, gt_cam_param[i][0], gt_cam_param[i][1:])

                    vert_pred = pred_vertices[i].cpu().numpy()
                    vert_pred = convert_smpl_to_bbox(vert_pred, pred_cam_param[i][0], pred_cam_param[i][1:])

                    smpl_face = humanModelViewer.GetSMPLFace()
                    # meshes_gt = {'ver': gt_vertices[i].cpu().numpy()*100, 'f': smpl_face, 'color': (255,0,0)}
                    # meshes_pred = {'ver': pred_vertices[i].cpu().numpy()*100, 'f': smpl_face, 'color': (0,255,0)}

                    meshes_gt = {'ver': vert_gt, 'f': smpl_face, 'color': (200,50,50)}
                    meshes_pred = {'ver': vert_pred, 'f': smpl_face, 'color': (50,200,50)}

                    glViewer.setMeshData([meshes_gt, meshes_pred], bComputeNormal= True)
                    glViewer.setBackgroundTexture(curImgVis)       #Vis raw video as background
                    glViewer.setWindowSize(curImgVis.shape[1]*5, curImgVis.shape[0]*5)
                    glViewer.SetOrthoCamera(True)
                    glViewer.show(0)
                    

            for ii, p in enumerate(batch['imgname'][:len(r_error)]):
                seqName = os.path.basename( os.path.dirname(p))
                # quant_mpjpe[step * batch_size:step * batch_size + curr_batch_size] = error
                if seqName not in quant_mpjpe.keys():
                    quant_mpjpe[seqName] = []
                    quant_recon_err[seqName] = []
                
                quant_mpjpe[seqName].append(error[ii]) 
                quant_recon_err[seqName].append(r_error[ii])


            #Visualize GT mesh and Pred Sekeleton
            if False:
                from renderer import viewer2D
                from renderer import glViewer
                import humanModelViewer

                gt_keypoints_3d_vis = gt_keypoints_3d.cpu().numpy()
                gt_keypoints_3d_vis = np.reshape(gt_keypoints_3d_vis, (gt_keypoints_3d_vis.shape[0],-1))        #N,14x3
                gt_keypoints_3d_vis = np.swapaxes(gt_keypoints_3d_vis, 0,1) *100

                pred_keypoints_3d_vis = pred_keypoints_3d.cpu().numpy()
                pred_keypoints_3d_vis = np.reshape(pred_keypoints_3d_vis, (pred_keypoints_3d_vis.shape[0],-1))        #N,14x3
                pred_keypoints_3d_vis = np.swapaxes(pred_keypoints_3d_vis, 0,1) *100
                # output_sample = output_sample[ : , np.newaxis]*0.1
                # gt_sample = gt_sample[: , np.newaxis]*0.1
                # (skelNum, dim, frames)
                for f in range(gt_keypoints_3d_vis.shape[1]):
                    glViewer.setSkeleton( [gt_keypoints_3d_vis[:,[f]], pred_keypoints_3d_vis[:,[f]]] ,jointType='smplcoco')#(skelNum, dim, frames)
                    glViewer.show(0)


            # Reconstuction_error
            # quant_recon_err[step * batch_size:step * batch_size + curr_batch_size] = r_error

            list_mpjpe = np.hstack([ quant_mpjpe[k] for k in quant_mpjpe])
            list_reconError = np.hstack([ quant_recon_err[k] for k in quant_recon_err])
            if bVerbose:
                print(">>> {} : MPJPE {:.02f} mm, error: {:.02f} mm | Total MPJPE {:.02f} mm, error {:.02f} mm".format(seqName, np.mean(error)*1000, np.mean(r_error)*1000, np.hstack(list_mpjpe).mean()*1000, np.hstack(list_reconError).mean()*1000) )

            # print("MPJPE {}, error: {}".format(np.mean(error)*100, np.mean(r_error)*100))

        # If mask or part evaluation, render the mask and part images
        # if eval_masks or eval_parts:
        #     mask, parts = renderer(pred_vertices, pred_camera)

        # Mask evaluation (for LSP)
        if eval_masks:
            center = batch['center'].cpu().numpy()
            scale = batch['scale'].cpu().numpy()
            # Dimensions of original image
            orig_shape = batch['orig_shape'].cpu().numpy()
            for i in range(curr_batch_size):
                # After rendering, convert imate back to original resolution
                pred_mask = uncrop(mask[i].cpu().numpy(), center[i], scale[i], orig_shape[i]) > 0
                # Load gt mask
                gt_mask = cv2.imread(os.path.join(annot_path, batch['maskname'][i]), 0) > 0
                # Evaluation consistent with the original UP-3D code
                accuracy += (gt_mask == pred_mask).sum()
                pixel_count += np.prod(np.array(gt_mask.shape))
                for c in range(2):
                    cgt = gt_mask == c
                    cpred = pred_mask == c
                    tp[c] += (cgt & cpred).sum()
                    fp[c] +=  (~cgt & cpred).sum()
                    fn[c] +=  (cgt & ~cpred).sum()
                f1 = 2 * tp / (2 * tp + fp + fn)

        # Part evaluation (for LSP)
        if eval_parts:
            center = batch['center'].cpu().numpy()
            scale = batch['scale'].cpu().numpy()
            orig_shape = batch['orig_shape'].cpu().numpy()
            for i in range(curr_batch_size):
                pred_parts = uncrop(parts[i].cpu().numpy().astype(np.uint8), center[i], scale[i], orig_shape[i])
                # Load gt part segmentation
                gt_parts = cv2.imread(os.path.join(annot_path, batch['partname'][i]), 0)
                # Evaluation consistent with the original UP-3D code
                # 6 parts + background
                for c in range(7):
                   cgt = gt_parts == c
                   cpred = pred_parts == c
                   cpred[gt_parts == 255] = 0
                   parts_tp[c] += (cgt & cpred).sum()
                   parts_fp[c] +=  (~cgt & cpred).sum()
                   parts_fn[c] +=  (cgt & ~cpred).sum()
                gt_parts[gt_parts == 255] = 0
                pred_parts[pred_parts == 255] = 0
                parts_f1 = 2 * parts_tp / (2 * parts_tp + parts_fp + parts_fn)
                parts_accuracy += (gt_parts == pred_parts).sum()
                parts_pixel_count += np.prod(np.array(gt_parts.shape))

        # Print intermediate results during evaluation
        if bVerbose:
            if step % log_freq == log_freq - 1:
                if eval_pose:
                    print('MPJPE: ' + str(1000 * mpjpe[:step * batch_size].mean()))
                    print('Reconstruction Error: ' + str(1000 * recon_err[:step * batch_size].mean()))
                    print()
                if eval_masks:
                    print('Accuracy: ', accuracy / pixel_count)
                    print('F1: ', f1.mean())
                    print()
                if eval_parts:
                    print('Parts Accuracy: ', parts_accuracy / parts_pixel_count)
                    print('Parts F1 (BG): ', parts_f1[[0,1,2,3,4,5,6]].mean())
                    print()

        # if step==3:     #Debug
        #     break
    # Save reconstructions to a file for further processing
    if save_results:
        np.savez(result_file, pred_joints=pred_joints, pose=smpl_pose, betas=smpl_betas, camera=smpl_camera)
    # Print final results during evaluation

    if bVerbose:
        print('*** Final Results ***')
        print()
    

    evalLog ={}

    if eval_pose:
        # if bVerbose:
        #     print('MPJPE: ' + str(1000 * mpjpe.mean()))
        #     print('Reconstruction Error: ' + str(1000 * recon_err.mean()))
        #     print()
        list_mpjpe = np.hstack([ quant_mpjpe[k] for k in quant_mpjpe])
        list_reconError = np.hstack([ quant_recon_err[k] for k in quant_recon_err])

        output_str ='SeqNames; '
        for seq in quant_mpjpe:
            output_str += seq + ';'
        output_str +='\n MPJPE; '
        quant_mpjpe_avg_mm = np.hstack(list_mpjpe).mean()*1000
        output_str += "Avg {:.02f} mm; ".format( quant_mpjpe_avg_mm)
        for seq in quant_mpjpe:
            output_str += '{:.02f}; '.format(1000 * np.hstack(quant_mpjpe[seq]).mean())


        output_str +='\n Recon Error; '
        quant_recon_error_avg_mm = np.hstack(list_reconError).mean()*1000
        output_str +="Avg {:.02f}mm; ".format( quant_recon_error_avg_mm )
        for seq in quant_recon_err:
            output_str += '{:.02f}; '.format(1000 * np.hstack(quant_recon_err[seq]).mean())
        if bVerbose:
            print(output_str)
        else:
            print(">>>  Test on 3DPW: MPJPE: {} | quant_recon_error_avg_mm: {}".format(quant_mpjpe_avg_mm, quant_recon_error_avg_mm) )

        #Save output to dict
        # evalLog['checkpoint']= args.checkpoint
        evalLog['testdb'] = dataset_name
        evalLog['datasize'] = len(data_loader.dataset)
        
        for seq in quant_mpjpe:
            quant_mpjpe[seq] = 1000 * np.hstack(quant_mpjpe[seq]).mean()
        for seq in quant_recon_err:
            quant_recon_err[seq] = 1000 * np.hstack(quant_recon_err[seq]).mean()

        evalLog['quant_mpjpe'] = quant_mpjpe              #MPJPE
        evalLog['quant_recon_err']= quant_recon_err   #PA-MPJPE
        evalLog['quant_output_logstr']= output_str   #PA-MPJPE
        

        evalLog['quant_mpjpe_avg_mm'] = quant_mpjpe_avg_mm              #MPJPE
        evalLog['quant_recon_error_avg_mm']= quant_recon_error_avg_mm   #PA-MPJPE
       
        # return quant_mpjpe_avg_mm, quant_recon_error_avg_mm, evalLog
        return evalLog

    if bVerbose:
        if eval_masks:
            print('Accuracy: ', accuracy / pixel_count)
            print('F1: ', f1.mean())
            print()
        if eval_parts:
            print('Parts Accuracy: ', parts_accuracy / parts_pixel_count)
            print('Parts F1 (BG): ', parts_f1[[0,1,2,3,4,5,6]].mean())
            print()

        
    return -1       #Should return something
コード例 #6
0
def exportOursToSpin(out_path):

    scaleFactor = 1.2

    imgDir = '/run/media/hjoo/disk/data/panoptic_mtc/a4_release/hdImgs/'

    with open(
            '/run/media/hjoo/disk/data/panoptic_mtc/a4_release/annotation.pkl',
            'rb') as f:
        data = pickle.load(f)

    with open(
            '/run/media/hjoo/disk/data/panoptic_mtc/a4_release/camera_data.pkl',
            'rb') as f:
        cam = pickle.load(f)

    # structs we need
    imgnames_, scales_, centers_, parts_, openposes_ = [], [], [], [], []

    #additional 3D
    poses_, shapes_, skel3D_, has_smpl_ = [], [], [], []

    rhand_2d_list, rhand_3d_list, lhand_2d_list, lhand_3d_list = [], [], [], []

    for training_testing, mode_data in data.items():
        len_mode = len(mode_data)
        for i, sample in enumerate(tqdm(mode_data)):
            seqName = sample['seqName']
            # print(seqName)
            frame_str = sample['frame_str']
            frame_path = '{}/{}'.format(seqName, frame_str)

            assert 'body' in sample

            if 'right_hand' not in sample or 'left_hand' not in sample:
                continue

            body_landmark = np.array(sample['body']['landmarks']).reshape(
                -1, 3)  #19, 3          #SMC19 order

            rhand_landmark = np.array(
                sample['right_hand']['landmarks']).reshape(
                    -1, 3)  #19, 3          #SMC19 order
            lhand_landmark = np.array(
                sample['left_hand']['landmarks']).reshape(
                    -1, 3)  #19, 3          #SMC19 order

            # randomly project the skeleton to a viewpoint

            for c in range(0, 30):

                if c in [1, 2, 4, 6, 7, 13, 17, 19, 28]:  #Exclude top views
                    continue

                calib_data = cam[seqName][c]

                skeleton_3d_camview = applyExtrinsic(body_landmark,
                                                     calib_data)  #19,3
                rhand_3d_camview = applyExtrinsic(rhand_landmark,
                                                  calib_data)  #21,3
                lhand_3d_camview = applyExtrinsic(lhand_landmark,
                                                  calib_data)  #21,3

                skeleton_2d = project2D(body_landmark, calib_data)  #19,2
                rhand_2d = project2D(rhand_landmark, calib_data)  #19,2
                lhand_2d = project2D(lhand_landmark, calib_data)  #19,2

                imgName = os.path.join(frame_path,
                                       '00_{:02d}_{}.jpg'.format(c, frame_str))

                # print(imgName)
                imgFullPath = os.path.join(imgDir, imgName)
                if os.path.exists(imgFullPath) == False:
                    continue
                # print(imgName)

                #Visulaize 3D
                if False:
                    img = cv2.imread(imgFullPath)
                    # img = viewer2D.Vis_Skeleton_2D_SMC19(skeleton_2d, image=img)
                    # viewer2D.ImShow(img, waitTime=1)

                    skeleton_3d_camview = skeleton_3d_camview.ravel()[:, np.
                                                                      newaxis]
                    rhand_3d_camview = rhand_3d_camview.ravel()[:, np.newaxis]
                    lhand_3d_camview = lhand_3d_camview.ravel()[:, np.newaxis]
                    glViewer.setSkeleton([
                        skeleton_3d_camview, rhand_3d_camview, lhand_3d_camview
                    ])

                    glViewer.setBackgroundTexture(img)
                    glViewer.SetOrthoCamera(True)
                    glViewer.show(0)

                min_pt = np.min(skeleton_2d, axis=0)
                min_pt[0] = max(min_pt[0], 0)
                min_pt[1] = max(min_pt[1], 0)

                max_pt = np.max(skeleton_2d, axis=0)
                max_pt[0] = min(max_pt[0], 1920)
                max_pt[1] = min(max_pt[1], 1080)
                # bbox= [ min_pt[0], min_pt[1], max_pt[0], max_pt[1] ]
                bbox = [
                    min_pt[0], min_pt[1], max_pt[0] - min_pt[0],
                    max_pt[1] - min_pt[1]
                ]

                center = [bbox[0] + bbox[2] / 2, bbox[1] + bbox[3] / 2]
                scale = scaleFactor * max(bbox[2], bbox[3]) / 200

                #Save data
                # imgnames_.append(os.path.join('train',fileName_saved))
                # imgnames_.append(os.path.join('train',fileName_saved))
                imgnames_.append(imgName)
                openposes_.append(np.zeros([25, 3]))  #blank
                centers_.append(center)
                scales_.append(scale)
                # has_smpl_.append(1)
                # poses_.append(sample['pose3DParam']['pose'])        #(72,)
                # shapes_.append(sample['pose3DParam']['shape'])       #(10,)

                #2D keypoints (total26 -> SPIN24)
                poseidx_spin24 = [
                    0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 14, 12, 19, 20, 22,
                    21, 23
                ]
                poseidx_smc19 = [
                    14, 13, 12, 6, 7, 8, 11, 10, 9, 3, 4, 5, 2, 0, 1, 15, 16,
                    17, 18
                ]
                part = np.zeros([24, 3])
                part[poseidx_spin24, :2] = skeleton_2d[
                    poseidx_smc19]  #(52,)  totalGT26 type
                part[poseidx_spin24, 2] = 1
                parts_.append(part)

                #3D joint
                S = np.zeros([24, 4])
                S[poseidx_spin24, :3] = skeleton_3d_camview[
                    poseidx_smc19, :] * 0.01  #Scaling skeleton 3D (currently cm) -> meter
                S[poseidx_spin24, 3] = 1

                skel3D_.append(S)

                rhand_2d_list.append(rhand_2d)
                rhand_3d_list.append(rhand_3d_camview * 0.01)

                lhand_2d_list.append(lhand_2d)
                lhand_3d_list.append(lhand_3d_camview * 0.01)

                #Add hand joints

                #Debug 2D Visualize
                if False:
                    img = cv2.imread(imgFullPath)
                    # img = cv2.imread( os.path.join( '/run/media/hjoo/disk/data/mpii_human_pose_v1/images',imgnames_[-1]) )
                    img = viewer2D.Vis_Skeleton_2D_SMC19(skeleton_2d,
                                                         image=img)
                    img = viewer2D.Vis_Skeleton_2D_Hand(rhand_2d, image=img)
                    img = viewer2D.Vis_Skeleton_2D_Hand(lhand_2d, image=img)

                    img = viewer2D.Vis_Bbox_minmaxPt(img, min_pt, max_pt)
                    viewer2D.ImShow(img, waitTime=0)

                #Debug 3D Visualize smpl_coco
                if False:
                    # data3D_coco_vis = np.reshape(data3D_coco, (data3D_coco.shape[0],-1)).transpose()   #(Dim, F)
                    # data3D_coco_vis *=0.1   #mm to cm
                    # glViewer.setSkeleton( [ data3D_coco_vis] ,jointType='smplcoco')
                    # glViewer.show()

                    #Debug 3D Visualize, h36m
                    data3D_h36m_vis = np.reshape(
                        data3D_h36m,
                        (data3D_h36m.shape[0], -1)).transpose()  #(Dim, F)
                    data3D_h36m_vis *= 0.001  #meter to cm

                    # data3D_smpl24 = np.reshape(data3D_smpl24, (data3D_smpl24.shape[0],-1)).transpose()   #(Dim, F)
                    # data3D_smpl24 *=0.1

                    glViewer.setSkeleton([data3D_h36m_vis],
                                         jointType='smplcoco')
                    glViewer.show()

    # print("Final Img Num: {}, Final Sample Num: {}".format( len(set(imgnames_) , len(imgnames_)) ) )
    print("Final Sample Num: {}".format(len(imgnames_)))
    # store the data struct
    if not os.path.isdir(out_path):
        os.makedirs(out_path)
    # out_file = os.path.join(out_path, '1031-mpii3D_train_44257_all.npz')
    out_file = os.path.join(out_path, 'panopticDB.npz')

    print(f"Save to {out_file}")

    np.savez(out_file,
             imgname=imgnames_,
             center=centers_,
             scale=scales_,
             part=parts_,
             openpose=openposes_,
             S=skel3D_,
             rhand_3d=rhand_3d_list,
             rhand_2d=rhand_2d_list,
             lhand_3d=lhand_3d_list,
             lhand_2d=lhand_2d_list)
コード例 #7
0
def pw3d_extract(dataset_path, out_path):

    pw3d_multicrop_info = {}
    # pw3d_crop_info[ (image_name, pid)]
    # E.g., ('downtown_sitOnStairs_00/image_00000.jpg', 0)
    # return an array with 8 level of bbox (0 face, 7 whole body)
    # bbox_list[0]
    # {'bbox_xyhw': [995.412413595738, 374.69671840965594, 98.54587305319353, 81.94162583240131], 'center': [1044.6853501223347, 415.6675313258566], 'ratio_bbox_over_face': 1.0, 'scale': 0.5912752383191612}

    # scale factor
    scaleFactor = 1.2

    # structs we use
    imgnames_, scales_, centers_, parts_ = [], [], [], []
    poses_, shapes_, genders_ = [], [], []
    multilevel_bboxinfo_ = []
    # get a list of .pkl files in the directory
    dataset_path = os.path.join(dataset_path, 'sequenceFiles', 'test')
    files = [
        os.path.join(dataset_path, f) for f in os.listdir(dataset_path)
        if f.endswith('.pkl')
    ]
    # go through all the .pkl files
    for filename in files:
        with open(filename, 'rb') as f:
            print(f"processing: {filename}")
            data = pickle.load(f, encoding='latin1')
            smpl_pose = data['poses']
            smpl_betas = data['betas']
            poses2d = data['poses2d']  #(N, 3, 18)
            global_poses = data['cam_poses']
            genders = data['genders']
            valid = np.array(data['campose_valid']).astype(np.bool)

            # if False:        #Temporal. To export all 3DPW data
            #     for ii in range(len(valid)):
            #         valid[ii][:] =True

            num_people = len(smpl_pose)
            num_frames = len(smpl_pose[0])
            seq_name = str(data['sequence'])
            img_names = np.array([
                'imageFiles/' + seq_name + '/image_%s.jpg' % str(i).zfill(5)
                for i in range(num_frames)
            ])
            smpl_trans = data['trans']

            # get through all the people in the sequence
            for p_id in range(num_people):

                valid_pose = smpl_pose[p_id][valid[p_id]]
                valid_betas = np.tile(smpl_betas[p_id][:10].reshape(1, -1),
                                      (num_frames, 1))
                valid_betas = valid_betas[valid[p_id]]
                valid_keypoints_2d = poses2d[p_id][valid[p_id]]
                valid_img_names = img_names[valid[p_id]]
                valid_global_poses = global_poses[valid[p_id]]
                valid_smpl_trans = smpl_trans[p_id][valid[p_id]]

                gender = genders[p_id]

                assert (gender == 'm')

                # consider only valid frames
                for valid_i in tqdm(range(valid_pose.shape[0])):
                    part = valid_keypoints_2d[valid_i, :, :].T
                    cur_img_name = valid_img_names[valid_i]

                    #Disable lower bodies (openpose COCO18 index)
                    # part[ [9,10,12,13], 2] = 0      #Upper body only  by ignoring
                    # bHeadOnly = False
                    # if bHeadOnly:
                    #     part[ [4,7, 3,6, 8, 11], 2] = 0

                    target_joint = [
                        10, 9, 8, 11, 12, 13, 4, 3, 2, 5, 6, 7, 0, 0
                    ]  #From VIBE
                    valid_joint_cnt = part[target_joint, 2] > 0.3
                    valid_joint_cnt[12:] = 0

                    if vibe_protocol and sum(
                            valid_joint_cnt) <= 6:  #Following VIBE's prop
                        # reject_cnt+=1
                        continue

                    part = part[part[:, 2] > 0, :]
                    bbox = [
                        min(part[:, 0]),
                        min(
                            part[:, 1]
                        ),  #Tight bbox from keypoint, minX, minY, maxX, maxY
                        max(part[:, 0]),
                        max(part[:, 1])
                    ]
                    # if bHeadOnly:       #To cover head top
                    #     bbox[1] -= abs(bbox[3] - bbox[1])
                    center = [(bbox[2] + bbox[0]) / 2, (bbox[3] + bbox[1]) / 2]
                    scale = scaleFactor * max(bbox[2] - bbox[0],
                                              bbox[3] - bbox[1]) / 200

                    # transform global pose
                    pose = valid_pose[valid_i].copy()
                    extrinsics = valid_global_poses[valid_i][:3, :3]
                    pose[:3] = cv2.Rodrigues(
                        np.dot(extrinsics,
                               cv2.Rodrigues(pose[:3])[0]))[0].T[0]

                    imgnames_.append(valid_img_names[valid_i])
                    centers_.append(center)
                    scales_.append(scale)
                    poses_.append(pose)
                    shapes_.append(valid_betas[valid_i])
                    genders_.append(gender)

                    poseidx_spin24 = [
                        0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 19, 20, 21, 22,
                        23
                    ]
                    poseidx_openpose18 = [
                        10, 9, 8, 11, 12, 13, 4, 3, 2, 5, 6, 7, 0, 15, 14, 17,
                        16
                    ]
                    part = np.zeros([24, 3])
                    openpose_pt2d = valid_keypoints_2d[valid_i, :, :].T  #18,3
                    part[poseidx_spin24, :] = openpose_pt2d[
                        poseidx_openpose18, :]
                    part[poseidx_spin24,
                         2] = 1 * (part[poseidx_spin24, 2] > 0.3)
                    parts_.append(part)

                    #2D keypoints (total26 -> SPIN24)
                    if False:

                        imgName = os.path.join(
                            '/run/media/hjoo/disk/data/3dpw',
                            valid_img_names[valid_i])
                        rawImg = cv2.imread(imgName)
                        # viewer2D.ImShow(rawImg)
                        # rawImg = viewer2D.Vis_Bbox_minmaxPt(rawImg, bbox[:2], bbox[2:])
                        # rawImg = viewer2D.Vis_Skeleton_2D_Openpose18(openpose_pt2d[:,:2].ravel(), image= rawImg, pt2d_visibility=openpose_pt2d[:,2]>0.2)
                        # rawImg = viewer2D.Vis_Skeleton_2D_Openpose18(openpose_pt2d[:,:2].ravel(), image= rawImg, pt2d_visibility=openpose_pt2d[:,2]>0.2)
                        # rawImg = viewer2D.Vis_Skeleton_2D_SPIN49(openpose_pt2d[:,:2], image= rawImg, pt2d_visibility=openpose_pt2d[:,2]>0.2)
                        # viewer2D.ImShow(rawImg)

                    #Draw Mesh SMPL
                    bDebugVis = False
                    if True:
                        imgName = os.path.join(
                            '/run/media/hjoo/disk/data/3dpw',
                            valid_img_names[valid_i])
                        rawImg = cv2.imread(imgName)

                        bbox_list = []
                        # cam_ext = data['cam_poses'][valid_i]    #4x4
                        cam_int = data['cam_intrinsics']  #3x3

                        import torch
                        import glViewer
                        valid_betas_vis = torch.from_numpy(
                            shapes_[-1][np.newaxis, :]).float()
                        valid_pose_vis = torch.from_numpy(
                            valid_pose[valid_i].copy()[np.newaxis, :]).float()
                        smpl_out = smpl_male(
                            betas=valid_betas_vis,
                            body_pose=valid_pose_vis[:, 3:],
                            global_orient=valid_pose_vis[:, :3])

                        ours_vertices = smpl_out.vertices.detach().cpu().numpy(
                        )[0]
                        ours_vertices += valid_smpl_trans[valid_i]

                        #Projection
                        ver_3d_camview = np.matmul(
                            valid_global_poses[valid_i, :3, :3],
                            ours_vertices.transpose()).transpose(
                            ) + valid_global_poses[valid_i, :3, 3]
                        ver_2d = np.matmul(
                            cam_int, ver_3d_camview.transpose()).transpose()
                        ver_2d[:, 0] = ver_2d[:, 0] / ver_2d[:, 2]
                        ver_2d[:, 1] = ver_2d[:, 1] / ver_2d[:, 2]
                        ver_2d = ver_2d[:, :2]

                        #Find face bbox, tight human bbox
                        bbox_xyxy_full = np.array([
                            min(ver_2d[:, 0]),
                            min(ver_2d[:, 1]),
                            max(ver_2d[:, 0]),
                            max(ver_2d[:, 1])
                        ])

                        # Get face bbox (min size)
                        headVerIdx = g_smpl_facepart['head']
                        headVert = ver_2d[headVerIdx]
                        minPt = [min(headVert[:, 0]), min(headVert[:, 1])]
                        maxPt = [max(headVert[:, 0]), max(headVert[:, 1])]
                        bbox_xyxy_small = np.array(
                            [minPt[0], minPt[1], maxPt[0], maxPt[1]])

                        # rawImg= viewer2D.Vis_Pt2ds(ver_2d,rawImg)
                        # rawImg = viewer2D.Vis_Bbox_minmaxPt(rawImg, bbox_xyxy_full[:2], bbox_xyxy_full[2:], color=(255,255,0))
                        # rawImg = viewer2D.Vis_Bbox_minmaxPt(rawImg,  bbox_xyxy_small[:2], bbox_xyxy_small[2:] ,color=(255,255,0))

                        #Interpolation
                        minPt_d = bbox_xyxy_full[:2] - bbox_xyxy_small[:2]
                        maxPt_d = bbox_xyxy_full[2:] - bbox_xyxy_small[2:]

                        for i in range(8):
                            crop_level = i
                            # if True:
                            # i = crop_level
                            cur_minPt = bbox_xyxy_small[:2] + minPt_d * i / 7.0
                            cur_maxPt = bbox_xyxy_small[2:] + maxPt_d * i / 7.0

                            bbox_xyhw = [
                                cur_minPt[0], cur_minPt[1],
                                cur_maxPt[0] - cur_minPt[0],
                                cur_maxPt[1] - cur_minPt[1]
                            ]
                            cur_center, cur_scale = conv_bboxinfo_bboxXYHW_to_centerscale(
                                bbox_xyhw)
                            cur_scale *= 1.2  #Scaling factor
                            cur_new_bboxXYXY = conv_bboxinfo_centerscale_to_bboxXYXY(
                                cur_center, cur_scale)

                            if is_export_imgs and crop_level in [7]:  #[1,2,4]:
                                #export cropped image into files
                                """Process rgb image and do augmentation."""
                                cropped_img = crop(rawImg,
                                                   cur_center,
                                                   cur_scale, [224, 224],
                                                   rot=0)
                                # viewer2D.ImShow(cropped_img,waitTime=0,name="cropped")
                                export_img_name = seq_name + '_' + os.path.basename(
                                    imgName)[:-4] + f'_pid{p_id}.jpg'
                                export_img_path = os.path.join(
                                    export_root, f'croplev_{crop_level}',
                                    export_img_name)

                                cv2.imwrite(export_img_path, cropped_img)
                            #Compute face to cur bbox ratio   cur_scale / face_scale
                            if i == 0:
                                ratio_bbox_over_face = 1.0
                            else:
                                ratio_bbox_over_face = cur_scale / bbox_list[
                                    0]['scale']

                            bbox_list.append({
                                "scale": cur_scale,
                                "center": cur_center,
                                "ratio_bbox_over_face": ratio_bbox_over_face,
                                "bbox_xyhw": bbox_xyhw
                            })

                            if bDebugVis:  #Draw full size bbox
                                print(
                                    f"{i}: {cur_scale}, {center}, {ratio_bbox_over_face}"
                                )
                                # tempImg = viewer2D.Vis_Bbox_minmaxPt(rawImg,  cur_minPt, cur_maxPt ,color=(255,255,255))
                                if i in [1, 2, 4]:
                                    tempImg = viewer2D.Vis_Bbox_minmaxPt(
                                        rawImg,
                                        cur_new_bboxXYXY[:2],
                                        cur_new_bboxXYXY[2:],
                                        color=(0, 255, 255))
                                else:
                                    tempImg = viewer2D.Vis_Bbox_minmaxPt(
                                        rawImg,
                                        cur_new_bboxXYXY[:2],
                                        cur_new_bboxXYXY[2:],
                                        color=(255, 0, 0))
                                viewer2D.ImShow(tempImg,
                                                name="bboxGen",
                                                waitTime=0)

                        # viewer2D.ImShow(rawImg)
                        multilevel_bboxinfo_.append(bbox_list)
                        key_name = (cur_img_name[11:], p_id)
                        assert key_name not in pw3d_multicrop_info.keys()
                        pw3d_multicrop_info[key_name] = [
                            dt['bbox_xyhw'] for dt in bbox_list
                        ]
                        # if valid_i==5:
                        #     break

    # store data
    if not os.path.isdir(out_path):
        os.makedirs(out_path)

    multicrop_out_path = os.path.join(
        out_path,
        'pw3d_multicrop_info_sample{}_py2.pkl'.format(len(imgnames_)))
    with open(multicrop_out_path, 'wb') as f:
        pickle.dump(pw3d_multicrop_info, f, protocol=2)
        # pickle.dump(your_object, your_file, protocol=2)

    for level in range(8):
        scales_ = []
        centers_ = []
        for i in range(len(multilevel_bboxinfo_)):
            scales_.append(multilevel_bboxinfo_[i][level]['scale'])
            centers_.append(multilevel_bboxinfo_[i][level]['center'])

        out_file = os.path.join(out_path, f'3dpw_test_multilevel_{level}.npz')

        np.savez(out_file,
                 imgname=imgnames_,
                 center=centers_,
                 scale=scales_,
                 pose=poses_,
                 shape=shapes_,
                 gender=genders_,
                 part=parts_)
コード例 #8
0
def visEFT_multiSubjects(renderer):

    bStopForEachSample = args.waitforkeys      #if True, it will wait for any key pressed to move to the next sample
    bShowTurnTable = args.turntable
    
    # inputDir = args.fit_dir
    inputData = args.fit_data
    imgDir = args.img_dir
    smplModelPath = args.smpl_dir + '/basicModel_neutral_lbs_10_207_0_v1.0.0.pkl'
    smpl = SMPL(smplModelPath, batch_size=1, create_transl=False)

    if os.path.exists(inputData):
        with open(inputData,'r') as f:
            eft_data = json.load(f)
            print("EFT data: ver {}".format(eft_data['ver']))
            eft_data_all = eft_data['data']
    else:
        print(f"ERROR:: Cannot find EFT data: {inputData}")
        assert False

    #Aggregate all efl per image
    eft_perimage ={}
    for idx, eft_data in enumerate(eft_data_all):
        #Load
        imageName = eft_data['imageName']
        if imageName not in eft_perimage.keys():
            eft_perimage[imageName] =[]

        eft_perimage[imageName].append(eft_data)


    for imgName in tqdm(eft_perimage):
        eft_data_perimage = eft_perimage[imgName]
        
        renderer.clear_mesh()

        for idx,eft_data in enumerate(eft_data_perimage):
            
            #Get raw image path
            imgFullPath = eft_data['imageName']
            imgName = os.path.basename(imgFullPath)
            imgFullPath =os.path.join(imgDir, imgName)
            if os.path.exists(imgFullPath) ==False:
                print(f"Img path is not valid: {imgFullPath}")
                assert False
            rawImg = cv2.imread(imgFullPath)
            print(f'Input image: {imgFullPath}')

            bbox_scale = eft_data['bbox_scale']
            bbox_center = eft_data['bbox_center']

            pred_camera = np.array(eft_data['parm_cam'])
            pred_betas = np.reshape(np.array( eft_data['parm_shape'], dtype=np.float32), (1,10) )     #(10,)
            pred_betas = torch.from_numpy(pred_betas)

            pred_pose_rotmat = np.reshape( np.array( eft_data['parm_pose'], dtype=np.float32), (1,24,3,3)  )        #(24,3,3)
            pred_pose_rotmat = torch.from_numpy(pred_pose_rotmat)
        
            # gt_keypoint_2d = np.reshape( np.array(eft_data['gt_keypoint_2d']), (-1,3))    #(49,3)
            keypoint_2d_validity = eft_data['joint_validity_openpose18']

            #COCO only. Annotation index
            print("COCO annotId: {}".format(eft_data['annotId']))

            #Obtain skeleton and smpl data
            smpl_output = smpl(betas=pred_betas, body_pose=pred_pose_rotmat[:,1:], global_orient=pred_pose_rotmat[:,0].unsqueeze(1), pose2rot=False )
            smpl_vertices = smpl_output.vertices.detach().cpu().numpy() 
            smpl_joints_3d = smpl_output.joints.detach().cpu().numpy() 

            #Crop image
            croppedImg, boxScale_o2n, bboxTopLeft = crop_bboxInfo(rawImg.copy(), bbox_center, bbox_scale, (BBOX_IMG_RES, BBOX_IMG_RES) )

            ########################
            # Visualize
            # Visualize 2D image
            if False:
                viewer2D.ImShow(rawImg, name='rawImg', waitTime=1)      #You should press any key 
                viewer2D.ImShow(croppedImg, name='croppedImg', waitTime=1)

            # Visualization Mesh on raw images
            if True:    
                camParam_scale = pred_camera[0]
                camParam_trans = pred_camera[1:]
                pred_vert_vis = smpl_vertices[0]
                smpl_joints_3d_vis = smpl_joints_3d[0]

                if False:#args.onbbox:      #Always in the original image
                    pred_vert_vis = convert_smpl_to_bbox(pred_vert_vis, camParam_scale, camParam_trans)
                    smpl_joints_3d_vis = convert_smpl_to_bbox(smpl_joints_3d_vis, camParam_scale, camParam_trans)
                    renderer.setBackgroundTexture(croppedImg)
                    renderer.setViewportSize(croppedImg.shape[1], croppedImg.shape[0])
                else:
                    #Covert SMPL to BBox first
                    pred_vert_vis = convert_smpl_to_bbox(pred_vert_vis, camParam_scale, camParam_trans)
                    smpl_joints_3d_vis = convert_smpl_to_bbox(smpl_joints_3d_vis, camParam_scale, camParam_trans)

                    #From cropped space to original
                    pred_vert_vis = convert_bbox_to_oriIm(pred_vert_vis, boxScale_o2n, bboxTopLeft, rawImg.shape[1], rawImg.shape[0]) 
                    smpl_joints_3d_vis = convert_bbox_to_oriIm(smpl_joints_3d_vis, boxScale_o2n, bboxTopLeft, rawImg.shape[1], rawImg.shape[0])
                    renderer.setBackgroundTexture(rawImg)
                    renderer.setViewportSize(rawImg.shape[1], rawImg.shape[0])

                pred_meshes = {'ver': pred_vert_vis, 'f': smpl.faces}
                v = pred_meshes['ver'] 
                f = pred_meshes['f']

                #Visualize in the original image spaceq
                # renderer.set_mesh(v,f)
                renderer.add_mesh(v,f)

        #Render Mesh on the camera view
        renderer.showBackground(True)
        renderer.setWorldCenterBySceneCenter()
        renderer.setCameraViewMode("cam")
        renderer.display()
        overlaid = renderer.get_screen_color_ibgr()        #Overwite on rawImg
        # viewer2D.ImShow(overlaid,waitTime=1,name="overlaid")

        if bStopForEachSample:
            viewer2D.ImShow(overlaid,waitTime=0,name="overlaid") #waitTime=0 means that it will wait for any key pressed
        else:
            viewer2D.ImShow(overlaid,waitTime=1,name="overlaid")

        #Render Mesh on the rotating view
        if bShowTurnTable:
            renderer.showBackground(False)
            renderer.setWorldCenterBySceneCenter()
            renderer.setCameraViewMode("free")
            for i in range(90):
                renderer.setViewAngle(i*4,0)
                renderer.display()
                sideImg = renderer.get_screen_color_ibgr()        #Overwite on rawImg
                viewer2D.ImShow(sideImg,waitTime=1,name="turn_table")
            
        if True:    #Save the rendered image to files
            if os.path.exists(args.render_dir) == False:
                os.mkdir(args.render_dir)
            render_output_path = args.render_dir + '/render_{}.jpg'.format(imgName)
            print(f"Save to {render_output_path}")
            cv2.imwrite(render_output_path, rawImg)
コード例 #9
0
ファイル: pennaction.py プロジェクト: facebookresearch/eft
def LoadMocap(dbDir, imgRoot):

    blackList = [
        '0001', '0003', '0026', '0034', '1005', '1021', '1024', '1032'
    ]

    from scipy.io import loadmat

    annotations_all = []

    # seqPathList = sorted(glob.glob('{0}/*.json'.format(dbDir)) )
    seqPathList = sorted(glob.glob('{0}/*.mat'.format(dbDir)))

    #Iterate for each person
    for i, gtPath in enumerate(seqPathList):

        imageInfo = []
        seqName = os.path.basename(gtPath)[:4]

        if seqName in blackList:
            continue
        # seqName = os.path.basename(jsonData)[:-5]
        print('{}: {}'.format(seqName, i))

        mocap = loadmat(gtPath)
        # N x 13
        vis = mocap['visibility']

        x = mocap['x']
        y = mocap['y']
        skel2d_tracked = np.dstack((x, y, vis))  #(N, 13, 3)

        subjectId = 'pennaction_{}-id{:03d}'.format(
            seqName, 0)  #only single person in pennaction

        frameLeng = len(skel2d_tracked)
        for idx in range(frameLeng):
            skel2d = skel2d_tracked[idx]  #13,3

            if idx >= len(mocap['bbox']):
                print("out of range for bbox")
                break
            bbox = mocap['bbox'][idx]

            annot = {}  #Current annotation

            annot['keypoints'] = skel2d  #13,3
            annot['subjectId'] = subjectId
            imgPathFull = "{0}/{1}/{2:06d}.jpg".format(imgRoot, seqName,
                                                       idx + 1)

            annot['imgname'] = imgPathFull
            annot['bbox_xyxy'] = bbox

            if False:
                inputImg = cv2.imread(imgPathFull)
                inputImg = viewer2D.Vis_Skeleton_2D_pennaction(skel2d[:, :2],
                                                               skel2d[:, 2],
                                                               image=inputImg)
                inputImg = viewer2D.Vis_Bbox_minmaxPt(inputImg,
                                                      annot['bbox_xyxy'][:2],
                                                      annot['bbox_xyxy'][2:])
                viewer2D.ImShow(inputImg, waitTime=0)

            annotations_all.append(annot)

        # if(np.sum(j2d_validity_coco19)==0):
        #     print("No valid annotations")
        #     continue

    # trackid = annot['track_id']
    # bbox = annot['bbox']
    # image_id = annot['image_id']
    # annot_id = annot['id']
    # keypoints = annot['keypoints']
    #Img path, 2D keypoint, bbox
    return annotations_all
コード例 #10
0
ファイル: visEFTFit_gui.py プロジェクト: arturojreal/eft
def visEFT_multiSubjects(inputDir, imDir, smplModelDir, bUseSMPLX = False):

    if bUseSMPLX:
        smpl = SMPLX(smplModelDir, batch_size=1, create_transl=False)
    else:
        smpl = SMPL(smplModelDir, batch_size=1, create_transl=False)

    fileList  = listdir(inputDir)       #Check all fitting files

    print(">> Found {} files in the fitting folder {}".format(len(fileList), inputDir))
    totalCnt =0
    erroneousCnt =0
    #Merge sample from the same image

    data_perimage ={}
    for f in sorted(fileList):
        
        if "_init" in f:
            continue
        #Load
        imageName = f[:f.rfind('_')]
        if imageName not in data_perimage.keys():
            data_perimage[imageName] =[]

        data_perimage[imageName].append(f)

    for imgName in data_perimage:

        eftFileNames = data_perimage[imgName]

        meshData =[]
        skelData =[]
        for f in eftFileNames:
            fileFullPath = join(inputDir, f)
            with open(fileFullPath,'rb') as f:
                data = pickle.load(f)
            imgFullPathOri = data['imageName'][0]
            imgFullPath = os.path.join(imDir, os.path.basename(imgFullPathOri))

            
            if True:    #Additional path checking, if not valid
                if os.path.exists(imgFullPath) == False:
                    imgFullPath =getpath_level(imDir, imgFullPathOri ,1)
                if os.path.exists(imgFullPath) == False:
                    imgFullPath =getpath_level(imDir, imgFullPathOri,2)
                if os.path.exists(imgFullPath) == False:
                    imgFullPath =getpath_level(imDir, imgFullPathOri, 3 )

            scale = data['scale'][0]
            center = data['center'][0]

            ours_betas = torch.from_numpy(data['pred_shape'])
            ours_pose_rotmat = torch.from_numpy(data['pred_pose_rotmat'])
            # spin_betas = torch.from_numpy(data['opt_beta'])
            
            #Compute 2D reprojection error
            # if not (data['loss_keypoints_2d']<0.0001 or data['loss_keypoints_2d']>0.001 :
            #     continue
            maxBeta = abs(torch.max( abs(ours_betas)).item())

            if data['loss_keypoints_2d']>0.0005 or maxBeta>3:
                erroneousCnt +=1
            
            print(">>> loss2d: {}, maxBeta: {}".format( data['loss_keypoints_2d'],maxBeta) )

            # spin_pose = torch.from_numpy(data['opt_pose'])
            pred_camera_vis = data['pred_camera']
    
            assert os.path.exists(imgFullPath)
            rawImg = cv2.imread(imgFullPath)
            print(imgFullPath)

            croppedImg, boxScale_o2n, bboxTopLeft = crop_bboxInfo(rawImg, center, scale, (constants.IMG_RES, constants.IMG_RES) )

            #Visualize 2D image
            if args.bRenderToFiles ==False:
                viewer2D.ImShow(rawImg, name='rawImg', waitTime=10)      #You should press any key 
                viewer2D.ImShow(croppedImg, name='croppedImg', waitTime=10)

            if bUseSMPLX:
                ours_output = smpl(betas=ours_betas, body_pose=ours_pose_rotmat[:,1:-2], global_orient=ours_pose_rotmat[:,0].unsqueeze(1), pose2rot=False )
                # ours_output = smpl()        #Default test
            else:
                ours_output = smpl(betas=ours_betas, body_pose=ours_pose_rotmat[:,1:], global_orient=ours_pose_rotmat[:,0].unsqueeze(1), pose2rot=False )
                # ours_output = smpl()        #Default test
            ours_vertices = ours_output.vertices.detach().cpu().numpy() 
            ours_joints_3d = ours_output.joints.detach().cpu().numpy() 

            if False:   #Debugging
                # ours_vertices = ours_vertices - ours_joints_3d[0,12,:]
                save_mesh_obj(ours_vertices[0], smpl.faces, 'test.obj')

            #Visualize 3D mesh and 3D skeleton on original image space
            if True:
                b =0
                camParam_scale = pred_camera_vis[b,0]
                camParam_trans = pred_camera_vis[b,1:]

                ############### Visualize Mesh ############### 
                pred_vert_vis = ours_vertices[b].copy()
                pred_vert_vis = convert_smpl_to_bbox(pred_vert_vis, camParam_scale, camParam_trans)

                #From cropped space to original
                pred_vert_vis = convert_bbox_to_oriIm(pred_vert_vis, boxScale_o2n, bboxTopLeft, rawImg.shape[1], rawImg.shape[0]) 
                pred_meshes = {'ver': pred_vert_vis, 'f': smpl.faces}
                # glViewer.setMeshData([pred_meshes], bComputeNormal= True)

                # ################ Visualize Skeletons ############### 
                #Vis pred-SMPL joint
                # pred_joints_vis = ours_joints_3d[b,-9:,:3].copy()     #(N,3)        #Debuggin
                pred_joints_vis = ours_joints_3d[b,:,:3].copy()     #(N,3)
                pred_joints_vis = convert_smpl_to_bbox(pred_joints_vis, camParam_scale, camParam_trans)
                pred_joints_vis = convert_bbox_to_oriIm(pred_joints_vis, boxScale_o2n, bboxTopLeft, rawImg.shape[1], rawImg.shape[0]) 


                meshData.append(pred_meshes)
                skelData.append(pred_joints_vis.ravel()[:,np.newaxis])
                # glViewer.setSkeleton( [pred_joints_vis.ravel()[:,np.newaxis]])

                glViewer.setBackgroundTexture(rawImg)
                glViewer.setWindowSize(rawImg.shape[1]*args.magnifyFactor, rawImg.shape[0]*args.magnifyFactor)
                glViewer.SetOrthoCamera(True)

                # print("Press 'q' in the 3D window to go to the next sample")
                # glViewer.show(0)
        glViewer.setSkeleton(skelData)
        glViewer.setMeshData(meshData, bComputeNormal= True)

        if args.bRenderToFiles:        #Export rendered files
            if os.path.exists(render_dirName) == False:     #make a output folder if necessary
                    os.mkdir(render_dirName)
            fileName = imgFullPathOri[:-4].replace("/","_")
            glViewer.render_on_image(render_dirName, fileName, rawImg)
            print(f"render to {fileName}")

        glViewer.show(args.displaytime)
コード例 #11
0
ファイル: mpii3d.py プロジェクト: facebookresearch/eft
def exportOursToSpin(cocoPose3DAll, out_path):

    scaleFactor = 1.2


    # structs we need
    imgnames_, scales_, centers_, parts_, openposes_ = [], [], [], [], []

    #additional 3D
    poses_ , shapes_, skel3D_, has_smpl_  = [], [] ,[], []


    # for imgSample in cocoPose3DAll:
    imgNum = len(cocoPose3DAll)
    totalSampleNum = [ len(cocoPose3DAll[imgSample]) for imgSample in cocoPose3DAll ]
    totalSampleNum = sum(totalSampleNum)

    print("\n\n### ImageNum: {}, SampleNum: {} ###".format(imgNum, totalSampleNum))
    # for imgSample in cocoPose3DAll:
    # for key_imgId, imgSample in sorted(cocoPose3DAll.items()):
    for key_imgId, imgSample in sorted(cocoPose3DAll.items()):
        #load image
        imgPathFull = imgSample[0]['imgId']           
        fileName = os.path.basename(imgPathFull)
        fileName_saved = os.path.join(os.path.basename(os.path.dirname(imgPathFull)), fileName) #start from train2014


        for sample in imgSample:

            validJointNum = np.sum(sample['pose2D_validity'][::2])

            if validJointNum<4:
                continue

            if np.isnan(sample['pose3DParam']['camScale']):     
                continue

            gt_skel = np.reshape(sample['pose2D_gt'],(26,-1))       #(26,2) This is from data
            gt_validity = np.reshape(sample['pose2D_validity'],(26,-1))     #(26,2)

            
            # Filtering ########################################################################################################
            if True:
                requiredJoints= [0,1,2, 3,4,5, 6,7,8, 9,10,11]      #In Total26
                if np.min(gt_validity[requiredJoints,0])== False:
                    continue

            min_pt = np.min(gt_skel[gt_validity[:,0]], axis=0)
            max_pt = np.max(gt_skel[gt_validity[:,0]], axis=0)
            # bbox= [ min_pt[0], min_pt[1], max_pt[0], max_pt[1] ]
            bbox= [ min_pt[0], min_pt[1], max_pt[0] - min_pt[0], max_pt[1] - min_pt[1]]


            center = [bbox[0] + bbox[2]/2, bbox[1] + bbox[3]/2]
            scale = scaleFactor*max(bbox[2], bbox[3])/200


            #Save data
            imgnames_.append(os.path.join('images',fileName_saved)) 
            openposes_.append(np.zeros([25,3]))       #blank
            centers_.append(center)
            scales_.append(scale)
            has_smpl_.append(1)
            poses_.append(sample['pose3DParam']['pose'])        #(72,)
            shapes_.append(sample['pose3DParam']['shape'])       #(10,)


            #2D keypoints (total26 -> SPIN24)
            poseidx_spin24 = [0,1,2,  3,4,5, 6,7,8,  9,10,11, 19,20,21,22,23] 
            poseidx_total26 =  [0,1,2,  3,4,5,  6,7,8,  9,10,11,  14, 15, 16, 17, 18  ]
            part = np.zeros([24,3])
            part[poseidx_spin24,:2] = gt_skel[poseidx_total26] #(52,)  totalGT26 type
            part[poseidx_spin24,2] = 1*gt_validity[poseidx_total26,0]   
            parts_.append(part)

            #3D joint
            S = np.zeros([24,4])
            S[poseidx_spin24,:3] = sample['pose3D_pred'][poseidx_total26,:]  * 0.001     #Scaling skeleton 3D (currently mm) -> meter
            S[poseidx_spin24,3] = 1
            
            skel3D_.append(S)

            #Debug 2D Visualize
            if False:
                img = cv2.imread( os.path.join( '/run/media/hjoo/disk/data/mpii_human_pose_v1/images',imgnames_[-1]) )
                img = viewer2D.Vis_Skeleton_2D_smplCOCO(gt_skel, pt2d_visibility = gt_validity[:,0], image =img)
                img = viewer2D.Vis_Bbox_minmaxPt(img, min_pt, max_pt)
                viewer2D.ImShow(img, waitTime=0)  

            #Debug 3D Visualize smpl_coco
            if False:
                # data3D_coco_vis = np.reshape(data3D_coco, (data3D_coco.shape[0],-1)).transpose()   #(Dim, F)
                # data3D_coco_vis *=0.1   #mm to cm
                # glViewer.setSkeleton( [ data3D_coco_vis] ,jointType='smplcoco')
                # glViewer.show()

                #Debug 3D Visualize, h36m
                data3D_h36m_vis = np.reshape(data3D_h36m, (data3D_h36m.shape[0],-1)).transpose()   #(Dim, F)
                data3D_h36m_vis *=0.001   #meter to cm

                # data3D_smpl24 = np.reshape(data3D_smpl24, (data3D_smpl24.shape[0],-1)).transpose()   #(Dim, F)
                # data3D_smpl24 *=0.1

                glViewer.setSkeleton( [ data3D_h36m_vis]  ,jointType='smplcoco')
                glViewer.show()


            # keypoints

    # print("Final Img Num: {}, Final Sample Num: {}".format( len(set(imgnames_) , len(imgnames_)) ) )
    print("Final Sample Num: {}".format( len(imgnames_)))
    # store the data struct
    if not os.path.isdir(out_path):
        os.makedirs(out_path)
    # out_file = os.path.join(out_path, '1031-mpii3D_train_44257_all.npz')
    out_file = os.path.join(out_path, '1031-mpii3D_train_44257_validlimbs.npz')

    np.savez(out_file, imgname=imgnames_,
                       center=centers_,
                       scale=scales_,
                       part=parts_,
                       openpose=openposes_,
                       pose=poses_,

                        shape=shapes_,
                        has_smpl=has_smpl_,
                        S=skel3D_)
コード例 #12
0
def multilvel_bbox_crop_gen(rawImg,
                            smpl_vert,
                            fullsize_center,
                            fullsize_scale,
                            bDebug=True):
    """
    Generate bbox from smallest size(face) to full size
    args:
        fullsize_center, fullsize_scale: bbox given by original annotation  (full body or maximum size)
        smpl_vert: 
    """
    bbox_list = []

    bbox_xyxy_full = conv_bboxinfo_centerscale_to_bboxXYXY(
        fullsize_center, fullsize_scale)

    # Get face bbox (min size)
    headVerIdx = g_smpl_facepart['head']
    headVert = smpl_vert[headVerIdx]
    minPt = [
        min(headVert[:, 0]) + rawImg.shape[1] * 0.5,
        min(headVert[:, 1]) + rawImg.shape[0] * 0.5
    ]
    maxPt = [
        max(headVert[:, 0]) + rawImg.shape[1] * 0.5,
        max(headVert[:, 1]) + rawImg.shape[0] * 0.5
    ]
    bbox_xyxy_small = [minPt[0], minPt[1], maxPt[0], maxPt[1]]

    if bDebug:  #Draw full size bbox
        rawImg = viewer2D.Vis_Bbox_minmaxPt(rawImg.copy(),
                                            bbox_xyxy_full[:2],
                                            bbox_xyxy_full[2:],
                                            color=(255, 255, 0))
        rawImg = viewer2D.Vis_Bbox_minmaxPt(rawImg,
                                            bbox_xyxy_small[:2],
                                            bbox_xyxy_small[2:],
                                            color=(255, 255, 0))
        viewer2D.ImShow(rawImg, name="bboxGen")

    #Interpolation
    minPt_d = bbox_xyxy_full[:2] - bbox_xyxy_small[:2]
    maxPt_d = bbox_xyxy_full[2:] - bbox_xyxy_small[2:]
    for i in range(8):
        cur_minPt = bbox_xyxy_small[:2] + minPt_d * i / 7.0
        cur_maxPt = bbox_xyxy_small[2:] + maxPt_d * i / 7.0

        bbox_xyhw = [
            cur_minPt[0], cur_minPt[1], cur_maxPt[0] - cur_minPt[0],
            cur_maxPt[1] - cur_minPt[1]
        ]
        cur_center, cur_scale = conv_bboxinfo_bboxXYHW_to_centerscale(
            bbox_xyhw)
        #Compute face to cur bbox ratio   cur_scale / face_scale
        if i == 0:
            ratio_bbox_over_face = 1.0
        else:
            ratio_bbox_over_face = cur_scale / bbox_list[0]['scale']

        bbox_list.append({
            "scale": cur_scale,
            "center": cur_center,
            "ratio_bbox_over_face": ratio_bbox_over_face
        })

        if bDebug:  #Draw full size bbox
            print(f"{i}: {cur_scale}, {center}, {ratio_bbox_over_face}")
            tempImg = viewer2D.Vis_Bbox_minmaxPt(rawImg,
                                                 cur_minPt,
                                                 cur_maxPt,
                                                 color=(255, 255, 255))
            viewer2D.ImShow(tempImg, name="bboxGen", waitTime=0)

    if bDebug:  #Draw full size bbox
        for b in bbox_list:
            bbox_xyxy = conv_bboxinfo_centerscale_to_bboxXYXY(
                b['center'], b['scale'])
            vis = viewer2D.Vis_Bbox_minmaxPt(rawImg,
                                             bbox_xyxy[:2],
                                             bbox_xyxy[2:],
                                             color=(0, 255, 255))
            viewer2D.ImShow(vis, name="bboxGen", waitTime=0)

    return bbox_list
コード例 #13
0
ファイル: pw3d.py プロジェクト: facebookresearch/eft
def pw3d_extract(dataset_path, out_path):

    # scale factor
    scaleFactor = 1.2

    # structs we use
    imgnames_, scales_, centers_, parts_ = [], [], [], []
    poses_, shapes_, genders_ = [], [], []
    subjectIds_ = []

    # get a list of .pkl files in the directory
    dataset_path = os.path.join(dataset_path, 'sequenceFiles', g_traintest)
    files = [
        os.path.join(dataset_path, f) for f in os.listdir(dataset_path)
        if f.endswith('.pkl')
    ]

    reject_cnt = 0
    # go through all the .pkl files
    for filename in files:
        with open(filename, 'rb') as f:
            data = pickle.load(f, encoding='latin1')
            smpl_pose = data['poses']
            smpl_betas = data['betas']
            poses2d = data['poses2d']  #(N, 3, 18)
            global_poses = data['cam_poses']
            genders = data['genders']
            valid = np.array(data['campose_valid']).astype(np.bool)
            num_people = len(smpl_pose)
            num_frames = len(smpl_pose[0])
            seq_name = str(data['sequence'])
            img_names = np.array([
                'imageFiles/' + seq_name + '/image_%s.jpg' % str(i).zfill(5)
                for i in range(num_frames)
            ])
            # get through all the people in the sequence
            for i in range(num_people):
                p_id = i

                valid_pose = smpl_pose[i][valid[i]]
                valid_betas = np.tile(smpl_betas[i][:10].reshape(1, -1),
                                      (num_frames, 1))
                valid_betas = valid_betas[valid[i]]
                valid_keypoints_2d = poses2d[i][valid[i]]
                valid_img_names = img_names[valid[i]]
                valid_global_poses = global_poses[valid[i]]
                gender = genders[i]
                # consider only valid frames
                for valid_i in range(valid_pose.shape[0]):
                    part = valid_keypoints_2d[valid_i, :, :].T

                    target_joint = [
                        10, 9, 8, 11, 12, 13, 4, 3, 2, 5, 6, 7, 0, 0
                    ]  #From VIBE
                    valid_joint_cnt = part[target_joint, 2] > 0.3
                    valid_joint_cnt[12:] = 0
                    if sum(valid_joint_cnt) <= 6:  #Following VIBE's prop
                        reject_cnt += 1
                        continue

                    part = part[part[:, 2] > 0, :]
                    bbox = [
                        min(part[:, 0]),
                        min(part[:, 1]),
                        max(part[:, 0]),
                        max(part[:, 1])
                    ]

                    ##Temporary. Export bbox
                    if True:
                        bbox_out_path_root = '/run/media/hjoo/disk/data/3dpw/bbox'
                        img_name = valid_img_names[valid_i]
                        image_full_path = os.path.join(
                            '/run/media/hjoo/disk/data/3dpw', img_name)
                        bbox_xywh = [
                            bbox[0], bbox[1], bbox[2] - bbox[0],
                            bbox[3] - bbox[1]
                        ]
                        bbox_element = {
                            "image_path": image_full_path,
                            "body_bbox_list": [bbox_xywh],
                            "id_list": [p_id]
                        }
                        json_out_path = os.path.join(
                            bbox_out_path_root,
                            img_name[11:].replace('/', '_')[:-4] +
                            f'_pid{p_id}' + '.json')
                        print(f"Saved to {json_out_path}")
                        with open(json_out_path, 'w') as f:
                            json.dump(bbox_element, f)

                    center = [(bbox[2] + bbox[0]) / 2, (bbox[3] + bbox[1]) / 2]
                    scale = scaleFactor * max(bbox[2] - bbox[0],
                                              bbox[3] - bbox[1]) / 200

                    # transform global pose
                    pose = valid_pose[valid_i]  #(72,)
                    extrinsics = valid_global_poses[valid_i][:3, :3]
                    pose[:3] = cv2.Rodrigues(
                        np.dot(extrinsics,
                               cv2.Rodrigues(pose[:3])[0]))[0].T[0]

                    imgnames_.append(valid_img_names[valid_i])
                    centers_.append(center)  #BBox center
                    scales_.append(
                        scale)  #bbox scale (from tight bbox w.r.t 200)
                    poses_.append(pose)  #,72
                    shapes_.append(valid_betas[valid_i])
                    genders_.append(gender)
                    subjectIds_.append(p_id)

                    poseidx_spin24 = [
                        0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 19, 20, 21, 22,
                        23
                    ]
                    poseidx_openpose18 = [
                        10, 9, 8, 11, 12, 13, 4, 3, 2, 5, 6, 7, 0, 15, 14, 17,
                        16
                    ]
                    part = np.zeros([24, 3])
                    openpose_pt2d = valid_keypoints_2d[valid_i, :, :].T  #18,3
                    part[poseidx_spin24, :] = openpose_pt2d[
                        poseidx_openpose18, :]
                    part[poseidx_spin24,
                         2] = 1 * (part[poseidx_spin24, 2] > 0.3)
                    parts_.append(part)

                    #2D keypoints (total26 -> SPIN24)
                    if False:
                        from renderer import viewer2D
                        imgName = os.path.join(
                            '/run/media/hjoo/disk/data/3dpw',
                            valid_img_names[valid_i])
                        rawImg = cv2.imread(imgName)
                        # viewer2D.ImShow(rawImg)

                        rawImg = viewer2D.Vis_Skeleton_2D_Openpose18(
                            openpose_pt2d[:, :2].ravel(),
                            image=rawImg,
                            pt2d_visibility=openpose_pt2d[:, 2] > 0.2)
                        # rawImg = viewer2D.Vis_Skeleton_2D_SPIN49(openpose_pt2d[:,:2], image= rawImg, pt2d_visibility=openpose_pt2d[:,2]>0.2)
                        viewer2D.ImShow(rawImg)

    print("reject_cnt: {}, valid_cnt:{}".format(reject_cnt, len(imgnames_)))
    sampleNum = len(imgnames_)
    # store data
    if not os.path.isdir(out_path):
        os.makedirs(out_path)
    out_file = os.path.join(out_path,
                            f'3dpw_{g_traintest}_{sampleNum}_subjId.npz')
    np.savez(out_file,
             imgname=imgnames_,
             center=centers_,
             scale=scales_,
             pose=poses_,
             shape=shapes_,
             gender=genders_,
             subjectIds=subjectIds_,
             part=parts_)
コード例 #14
0
def LoadMocap(dbDir, imgRoot):

    # skel2DAll_coco_ori = {}
    # skel2DAll_coco_cropped = {}
    # skel2DAll_coco_validity = {}
    # imageInfoAll = {}   #Image path (seqName/image_%05d.jpg), bbox
    # cropInfoAll = {}
    # bbrInfoAll ={}
    annotations_all = []

    seqPathList = sorted(glob.glob('{0}/*.json'.format(dbDir)))

    #Iterate for each person
    for i, jsonData in enumerate(seqPathList):

        imageInfo = []
        seqName = os.path.basename(jsonData)[:-5]
        print('{}: {}'.format(seqName, i))

        coco = COCO(jsonData)
        img_ids = coco.getImgIds()
        imgs = coco.loadImgs(img_ids)

        posetrack_images = []
        for img in imgs:
            if not img[
                    'is_labeled']:  # or img['vid_id'] != '000015':  # Uncomment to filter for a sequence.
                pass
            else:
                posetrack_images.append(img)

        print("valid img num: {}".format(len(posetrack_images)))

        #get track_id min, max (to track each person one by one)
        minTrackId = 10000
        maxTrackId = -10000
        for image_idx, selected_im in enumerate(posetrack_images):
            ann_ids = coco.getAnnIds(imgIds=selected_im['id'])
            anns = coco.loadAnns(ann_ids)

            for ann_idx, ann in enumerate(anns):
                minTrackId = min(minTrackId, ann['track_id'])
                maxTrackId = max(maxTrackId, ann['track_id'])
        # print("seq:{}, minId{}, maxId{}".format(seqName,minTrackId, maxTrackId))

        trackIds = range(minTrackId, maxTrackId + 1)
        for trackId in trackIds:
            skel2dList = []
            imageInfo = []

            #Check all images and find the currently tracked one
            for image_idx, selected_im in enumerate(posetrack_images):
                imgName = selected_im['file_name']
                ann_ids = coco.getAnnIds(imgIds=selected_im['id'])
                anns = coco.loadAnns(ann_ids)

                #Find the currently tracked one
                for ann_idx, ann in enumerate(anns):
                    if (trackId != ann['track_id']):
                        continue

                    if not ('keypoints' in ann
                            and type(ann['keypoints']) == list):
                        continue

                    if 'bbox' not in ann.keys():
                        continue

                    kp = np.array(ann['keypoints'])  #(51,)
                    kp = np.reshape(kp, (-1, 3))  #(17,3)

                    validityCnt = np.sum(kp[:, -1])
                    if validityCnt < 1:
                        continue

                    assert kp.shape[0] == 17
                    # bbox = np.array(ann['bbox'])
                    ann['file_name'] = selected_im['file_name']
                    annotations_all.append(ann)

                    # # Visualize Image and skeletons
                    if False:
                        filePath = os.path.join(imgRoot,
                                                selected_im['file_name'])
                        inputImg = cv2.imread(filePath)
                        img = viewer2D.Vis_Posetrack(kp[:, :2], image=inputImg)
                        viewer2D.ImShow(img, waitTime=10)
                    break  #No need to find anymore on current image
        # break
        # if len(annotations_all)==100:     #Debug
        #     break

    return annotations_all
コード例 #15
0
                rawImg = viewer2D.Vis_Bbox_minmaxPt(rawImg, bbox_xyxy[:2],
                                                    bbox_xyxy[2:])

                # croppedImg = crop(rawImg, center, scale,
                #         [constants.IMG_RES, constants.IMG_RES])
                croppedImg, boxScale_o2n, bboxTopLeft = crop_bboxInfo(
                    rawImg, center, scale,
                    (constants.IMG_RES, constants.IMG_RES))

                #Visualize image
                if False:
                    rawImg = viewer2D.Vis_Skeleton_2D_SPIN49(
                        data['keypoint2d'][0][:, :2],
                        pt2d_visibility=data['keypoint2d'][0][:, 2],
                        image=rawImg)
                    viewer2D.ImShow(rawImg, name='rawImg')
                    viewer2D.ImShow(croppedImg, name='croppedImg')

                b = 0
                ############### Visualize Mesh ###############
                camParam_scale = pred_camera_vis[b, 0]
                camParam_trans = pred_camera_vis[b, 1:]
                pred_vert_vis = ours_vertices[b].copy()
                pred_vert_vis = convert_smpl_to_bbox(pred_vert_vis,
                                                     camParam_scale,
                                                     camParam_trans)

                #From cropped space to original
                pred_vert_vis = convert_bbox_to_oriIm(pred_vert_vis,
                                                      boxScale_o2n,
                                                      bboxTopLeft,
コード例 #16
0
def posetrack_extract(cocoPose3DAll, imgRootDir, out_path):

    # posetrack_index = range(17)
    # posetrack_index = np.array([0,1,2,  3,4,5,6,7,8,9,10,11,12,13,14,15,16])   #no maching for head (posetrack(2))
    # posetrack_to_smplCOCO18 = np.array([14, 12,19,  16, 17, 9, 8, 10, 7, 11, 6, 3, 2, 4, 1, 5, 0 ])      #no matching for head

    # convert joints to global order
    # posetrack_index = np.array([0,1,2,  3,4,5,6,7,8,9,10,11,12,13,14,15,16])   #no maching for head (posetrack(2))
    joints_idx = [19, 12, 13, 23, 22, 9, 8, 10, 7, 11, 6, 3, 2, 4, 1, 5, 0]

    # bbox expansion factor
    scaleFactor = 1.2

    # structs we need
    imgnames_, scales_, centers_, parts_, openposes_, annot_ids_ = [], [], [], [], [], []
    subjectIds_ = []

    for annot in cocoPose3DAll:
        # keypoints processing
        keypoints = annot['keypoints']
        keypoints = np.reshape(keypoints, (17, 3))
        keypoints[keypoints[:, 2] > 0, 2] = 1
        # check if all major body joints are annotated

        #Change the following to select a subset of coco
        if sum(keypoints[5:, 2] > 0
               ) < 12:  #Original: cases that all body limbs are annotated
            continue
        # if sum(keypoints[5:,2]>0) >= 12:   #If all parts are valid. skip. we already have this
        #     continue
        # if sum(keypoints[5:,2]>0) < 6:   #At least 6 joints should be there
        #     continue
        # image name
        image_id = annot['image_id']
        annot_id = annot['id']

        img_name = str(annot['file_name'])
        img_name_full = img_name  #join(imgRootDir, img_name)
        # keypoints
        part = np.zeros([24, 3])
        part[joints_idx] = keypoints
        # scale and center
        bbox = annot['bbox']  #X,Y,W,H
        center = [bbox[0] + bbox[2] / 2, bbox[1] + bbox[3] / 2]
        scale = scaleFactor * max(bbox[2], bbox[3]) / 200
        openpose = np.zeros([25, 3])  #blank

        if False:  #visualize
            imgPath = os.path.join(imgRootDir, img_name_full)
            raw_img = cv2.imread(imgPath)
            raw_img = viewer2D.Vis_Skeleton_2D_SPIN24(part[:, :2],
                                                      pt2d_visibility=part[:,
                                                                           2],
                                                      image=raw_img)
            # raw_img = viewer2D.Vis_Skeleton_2D_foot(foot_kp[:,:-1], foot_kp[:,-1] , image= raw_img)
            # raw_img = viewer2D.Vis_Skeleton_2D_Openpose25(openpose[:,:-1], openpose[:,-1] , image= raw_img)
            viewer2D.ImShow(raw_img, waitTime=0)

        #Generate a unique human ID:
        seqName = os.path.dirname(img_name_full)
        trackid = annot['track_id']
        subjectid = "{}-id{:03}".format(seqName, trackid)

        # store data
        subjectIds_.append(subjectid)
        imgnames_.append(img_name_full)
        annot_ids_.append(annot_id)
        centers_.append(center)
        scales_.append(scale)
        parts_.append(part)
        openposes_.append(openpose)

    # store the data struct
    if not os.path.isdir(out_path):
        os.makedirs(out_path)
    out_file = os.path.join(out_path, 'posetrack.npz')
    np.savez(out_file,
             imgname=imgnames_,
             center=centers_,
             scale=scales_,
             part=parts_,
             subjectIds=subjectIds_,
             openpose=openposes_,
             annotIds=annot_ids_)
コード例 #17
0
def exportOursToSpin(eftDir, out_path):

    # scaleFactor = 1.2

    # structs we need
    imgnames_, scales_, centers_, parts_, openposes_ = [], [], [], [], []

    #additional 3D
    poses_, shapes_, skel3D_, has_smpl_ = [], [], [], []

    pose3DList = os.listdir(eftDir)

    # for imgSample in cocoPose3DAll:
    sampleNum = len(pose3DList)
    # totalSampleNum = [ len(cocoPose3DAll[imgSample]) for imgSample in cocoPose3DAll ]
    # totalSampleNum = sum(totalSampleNum)
    print("\n\n### SampleNum: {} ###".format(sampleNum))

    maxDiff = 0
    for fname in tqdm(sorted(pose3DList)):

        fname_path = os.path.join(eftDir, fname)

        pose3d = pickle.load(open(fname_path, 'rb'))

        #load image
        imgPathFull = pose3d['imageName'][0]
        fileName = os.path.basename(imgPathFull)
        fileName_saved = os.path.join(
            os.path.basename(os.path.dirname(imgPathFull)),
            fileName)  #start from train2014
        center = pose3d['center'][0]
        scale = pose3d['scale'][0]

        smpl_shape = pose3d['pred_shape'].ravel()
        smpl_pose_mat = torch.from_numpy(
            pose3d['pred_pose_rotmat'][0])  #24,3,3
        pred_rotmat_hom = torch.cat([
            smpl_pose_mat.view(-1, 3, 3),
            torch.tensor(
                [0, 0, 0],
                dtype=torch.float32,
            ).view(1, 3, 1).expand(24, -1, -1)
        ],
                                    dim=-1)
        smpl_pose = tgm.rotation_matrix_to_angle_axis(
            pred_rotmat_hom).contiguous().view(-1, 72)

        #verification
        if True:
            recon_mat = batch_rodrigues(smpl_pose.view(
                -1, 3))  #24,3... axis -> rotmat
            diff = abs(recon_mat.numpy() -
                       pose3d['pred_pose_rotmat'][0])  #2.1234155e-07
            # print(np.max(diff))
            maxDiff = max(maxDiff, np.max(diff))

        smpl_pose = smpl_pose.numpy().ravel()

        openpose2d = pose3d['keypoint2d'][0][:25]  #25,3
        spin2d_skel24 = pose3d['keypoint2d'][0][25:]  #24,3

        #Save data
        imgnames_.append(fileName_saved)
        centers_.append(center)
        scales_.append(scale)
        has_smpl_.append(1)
        poses_.append(smpl_pose)  #(72,)
        shapes_.append(smpl_shape)  #(10,)

        openposes_.append(openpose2d)  #blank
        # print(openpose2d)/
        parts_.append(spin2d_skel24)

        #3D joint
        S = np.zeros(
            [24, 4])  #blank for 3d. TODO: may need to add valid data for this
        skel3D_.append(S)

        #Debug 2D Visualize
        if False:
            img = cv2.imread(
                os.path.join('/run/media/hjoo/disk/data/coco', imgnames_[-1]))
            img = viewer2D.Vis_Skeleton_2D_smplCOCO(
                gt_skel, pt2d_visibility=gt_validity[:, 0], image=img)
            img = viewer2D.Vis_Bbox_minmaxPt(img, min_pt, max_pt)
            viewer2D.ImShow(img, waitTime=0)

        #Debug 3D Visualize smpl_coco
        if False:
            # data3D_coco_vis = np.reshape(data3D_coco, (data3D_coco.shape[0],-1)).transpose()   #(Dim, F)
            # data3D_coco_vis *=0.1   #mm to cm
            # glViewer.setSkeleton( [ data3D_coco_vis] ,jointType='smplcoco')
            # glViewer.show()

            #Debug 3D Visualize, h36m
            data3D_h36m_vis = np.reshape(
                data3D_h36m, (data3D_h36m.shape[0], -1)).transpose()  #(Dim, F)
            data3D_h36m_vis *= 100  #meter to cm

            # data3D_smpl24 = np.reshape(data3D_smpl24, (data3D_smpl24.shape[0],-1)).transpose()   #(Dim, F)
            # data3D_smpl24 *=0.1

            glViewer.setSkeleton([data3D_h36m_vis], jointType='smplcoco')
            glViewer.show()

        # keypoints

    # print("Final Img Num: {}, Final Sample Num: {}".format( len(set(imgnames_) , len(imgnames_)) ) )
    print("Final Sample Num: {}".format(len(imgnames_)))
    print("maxDiff in rot conv.: {}".format(maxDiff))
    # store the data struct
    if not os.path.isdir(out_path):
        os.makedirs(out_path)
    out_file = os.path.join(out_path, os.path.basename(eftDir) + '.npz')

    print(f"Save to {out_file}")
    np.savez(out_file,
             imgname=imgnames_,
             center=centers_,
             scale=scales_,
             part=parts_,
             openpose=openposes_,
             pose=poses_,
             shape=shapes_,
             has_smpl=has_smpl_,
             S=skel3D_)
コード例 #18
0
def visEFT_singleSubject(renderer):

    MAGNIFY_RATIO = 3           #onbbox only. To magnify the rendered image size 

    bStopForEachSample = args.waitforkeys      #if True, it will wait for any key pressed to move to the next sample
    bShowTurnTable = args.turntable

    inputData = args.fit_data
    imgDir = args.img_dir

    #Load SMPL model
    smplModelPath = args.smpl_dir + '/basicModel_neutral_lbs_10_207_0_v1.0.0.pkl'
    smpl = SMPL_19(smplModelPath, batch_size=1, create_transl=False)
    
    #Load EFT fitting data
    print(f"Loading EFT data from {inputData}")
    if os.path.exists(inputData):
        with open(inputData,'r') as f:
            eft_data = json.load(f)
            print("EFT data: ver {}".format(eft_data['ver']))
            eft_data_all = eft_data['data']
    else:
        print(f"ERROR:: Cannot find EFT data: {inputData}")
        assert False


    #Visualize each EFT Fitting output
    for idx, eft_data in enumerate(tqdm(eft_data_all)):
        
        #Get raw image path
        imgFullPath = eft_data['imageName']
        # imgName = os.path.basename(imgFullPath)
        imgName = imgFullPath
        imgFullPath =os.path.join(imgDir, imgName)
        if os.path.exists(imgFullPath) ==False:
            print(f"Img path is not valid: {imgFullPath}")
            assert False
        rawImg = cv2.imread(imgFullPath)
        print(f'Input image: {imgFullPath}')

        #EFT data
        bbox_scale = eft_data['bbox_scale']
        bbox_center = eft_data['bbox_center']

        pred_camera = np.array(eft_data['parm_cam'])
        pred_betas = np.reshape(np.array( eft_data['parm_shape'], dtype=np.float32), (1,10) )     #(10,)
        pred_betas = torch.from_numpy(pred_betas)

        pred_pose_rotmat = np.reshape( np.array( eft_data['parm_pose'], dtype=np.float32), (1,24,3,3)  )        #(24,3,3)
        pred_pose_rotmat = torch.from_numpy(pred_pose_rotmat)

        keypoint_2d_validity = eft_data['joint_validity_openpose18']

        #COCO only. Annotation index
        if 'annotId' in eft_data.keys():
            print("COCO annotId: {}".format(eft_data['annotId']))


        #Get SMPL mesh and joints from SMPL parameters
        smpl_output = smpl(betas=pred_betas, body_pose=pred_pose_rotmat[:,1:], global_orient=pred_pose_rotmat[:,[0]], pose2rot=False)
        smpl_vertices = smpl_output.vertices.detach().cpu().numpy()[0]
        smpl_joints_3d = smpl_output.joints.detach().cpu().numpy()[0]

        #Crop image using cropping information
        croppedImg, boxScale_o2n, bboxTopLeft = crop_bboxInfo(rawImg, bbox_center, bbox_scale, (BBOX_IMG_RES, BBOX_IMG_RES) )


        if MAGNIFY_RATIO>1:
            croppedImg = cv2.resize(croppedImg, (croppedImg.shape[1]*MAGNIFY_RATIO, croppedImg.shape[0]*MAGNIFY_RATIO) )

        ########################
        # Visualization
        ########################

        # Visualize 2D image
        if True:
            viewer2D.ImShow(rawImg, name='rawImg', waitTime=1)      #You should press any key 
            viewer2D.ImShow(croppedImg, name='croppedImg', waitTime=1)

            #Convert bbox_center, bbox_scale --> bbox_xyxy
            bbox_xyxy = conv_bboxinfo_bboxXYXY(bbox_scale,bbox_center)
            img_bbox = viewer2D.Vis_Bbox_minmaxPt(rawImg.copy(),bbox_xyxy[:2], bbox_xyxy[2:])
            viewer2D.ImShow(img_bbox, name='img_bbox', waitTime=1)

        # Visualization Mesh
        if True:    
            camParam_scale = pred_camera[0]
            camParam_trans = pred_camera[1:]
            pred_vert_vis = smpl_vertices
            smpl_joints_3d_vis = smpl_joints_3d

            if args.onbbox:
                pred_vert_vis = convert_smpl_to_bbox(pred_vert_vis, camParam_scale, camParam_trans)
                smpl_joints_3d_vis = convert_smpl_to_bbox(smpl_joints_3d_vis, camParam_scale, camParam_trans)
                renderer.setBackgroundTexture(croppedImg)
                renderer.setViewportSize(croppedImg.shape[1], croppedImg.shape[0])

                pred_vert_vis *=MAGNIFY_RATIO
            else:
                #Covert SMPL to BBox first
                pred_vert_vis = convert_smpl_to_bbox(pred_vert_vis, camParam_scale, camParam_trans)
                smpl_joints_3d_vis = convert_smpl_to_bbox(smpl_joints_3d_vis, camParam_scale, camParam_trans)

                #From cropped space to original
                pred_vert_vis = convert_bbox_to_oriIm(pred_vert_vis, boxScale_o2n, bboxTopLeft, rawImg.shape[1], rawImg.shape[0]) 
                smpl_joints_3d_vis = convert_bbox_to_oriIm(smpl_joints_3d_vis, boxScale_o2n, bboxTopLeft, rawImg.shape[1], rawImg.shape[0])
                renderer.setBackgroundTexture(rawImg)
                renderer.setViewportSize(rawImg.shape[1], rawImg.shape[0])

                #In orthographic model. XY of 3D is just 2D projection
                smpl_joints_2d_vis = conv_3djoint_2djoint(smpl_joints_3d_vis,rawImg.shape )
                # image_2dkeypoint_pred = viewer2D.Vis_Skeleton_2D_smpl45(smpl_joints_2d_vis, image=rawImg.copy(),color=(0,255,255))
                image_2dkeypoint_pred = viewer2D.Vis_Skeleton_2D_Openpose18(smpl_joints_2d_vis, image=rawImg.copy(),color=(255,0,0))        #All 2D joint
                image_2dkeypoint_pred = viewer2D.Vis_Skeleton_2D_Openpose18(smpl_joints_2d_vis, pt2d_visibility=keypoint_2d_validity, image=image_2dkeypoint_pred,color=(0,255,255))        #Only valid
                viewer2D.ImShow(image_2dkeypoint_pred, name='keypoint_2d_pred', waitTime=1)

            pred_meshes = {'ver': pred_vert_vis, 'f': smpl.faces}
            v = pred_meshes['ver'] 
            f = pred_meshes['f']

            #Visualize in the original image space
            renderer.set_mesh(v,f)
            renderer.showBackground(True)
            renderer.setWorldCenterBySceneCenter()
            renderer.setCameraViewMode("cam")

            #Set image size for rendering
            if args.onbbox:
                renderer.setViewportSize(croppedImg.shape[1], croppedImg.shape[0])
            else:
                renderer.setViewportSize(rawImg.shape[1], rawImg.shape[0])
                
            renderer.display()
            renderImg = renderer.get_screen_color_ibgr()
            viewer2D.ImShow(renderImg,waitTime=1)
        
        # Visualize multi-level cropped bbox
        if args.multi_bbox:
            from demo.multi_bbox_gen import multilvel_bbox_crop_gen
            
            bbox_list = multilvel_bbox_crop_gen(rawImg, pred_vert_vis, bbox_center, bbox_scale)

            #Visualize BBox
            for b_idx, b in enumerate(bbox_list):
                # bbox_xyxy= conv_bboxinfo_centerscale_to_bboxXYXY(b['center'], b['scale'])
                bbox_xyxy= b['bbox_xyxy']
                if b_idx==0:
                    img_multi_bbox = viewer2D.Vis_Bbox_minmaxPt(rawImg,  bbox_xyxy[:2], bbox_xyxy[2:] ,color=(0,255,0))
                else:
                    img_multi_bbox = viewer2D.Vis_Bbox_minmaxPt(rawImg,  bbox_xyxy[:2], bbox_xyxy[2:] ,color=(0,255,255))
            viewer2D.ImShow(img_multi_bbox, name='multi_bbox', waitTime=1)
            # for bbox in bbox_list:


        # Visualization Mesh on side view
        if True:
            renderer.showBackground(False)
            renderer.setWorldCenterBySceneCenter()
            # renderer.setCameraViewMode("side")    #To show the object in side vie
            renderer.setCameraViewMode("free")     
            renderer.setViewAngle(90,20)

            #Set image size for rendering
            if args.onbbox:
                renderer.setViewportSize(croppedImg.shape[1], croppedImg.shape[0])
            else:
                renderer.setViewportSize(rawImg.shape[1], rawImg.shape[0])
            renderer.display()
            sideImg = renderer.get_screen_color_ibgr()        #Overwite on rawImg
            viewer2D.ImShow(sideImg,waitTime=1)
            
            sideImg = cv2.resize(sideImg, (renderImg.shape[1], renderImg.shape[0]) )
            # renderImg = cv2.resize(renderImg, (sideImg.shape[1], sideImg.shape[0]) )
        
        # Visualization Mesh on side view
        if True:
            renderer.showBackground(False)
            renderer.setWorldCenterBySceneCenter()
            # renderer.setCameraViewMode("side")    #To show the object in side vie
            renderer.setCameraViewMode("free")     
            renderer.setViewAngle(-60,50)

            #Set image size for rendering
            if args.onbbox:
                renderer.setViewportSize(croppedImg.shape[1], croppedImg.shape[0])
            else:
                renderer.setViewportSize(rawImg.shape[1], rawImg.shape[0])
            renderer.display()
            sideImg_2 = renderer.get_screen_color_ibgr()        #Overwite on rawImg
            viewer2D.ImShow(sideImg_2,waitTime=1)
            
            sideImg_2 = cv2.resize(sideImg_2, (renderImg.shape[1], renderImg.shape[0]) )
            # renderImg = cv2.resize(renderImg, (sideImg.shape[1], sideImg.shape[0]) )


        #Visualize camera view and side view
        saveImg = np.concatenate( (renderImg,sideImg), axis =1)
        # saveImg = np.concatenate( (croppedImg, renderImg,sideImg, sideImg_2), axis =1)

        if bStopForEachSample:
            viewer2D.ImShow(saveImg,waitTime=0) #waitTime=0 means that it will wait for any key pressed
        else:
            viewer2D.ImShow(saveImg,waitTime=1)
        
        #Render Mesh on the rotating view
        if bShowTurnTable:
            renderer.showBackground(False)
            renderer.setWorldCenterBySceneCenter()
            renderer.setCameraViewMode("free")
            for i in range(90):
                renderer.setViewAngle(i*4,0)
                renderer.display()
                sideImg = renderer.get_screen_color_ibgr()        #Overwite on rawImg
                viewer2D.ImShow(sideImg,waitTime=1,name="turn_table")

                if False:       #If you want to save this into files
                    render_output_path = args.render_dir + '/turntable_{}_{:08d}.jpg'.format(os.path.basename(imgName),i)
                    cv2.imwrite(render_output_path, sideImg)

        #Save the rendered image to files
        if True:    
            if os.path.exists(args.render_dir) == False:
                os.mkdir(args.render_dir)
            render_output_path = args.render_dir + '/render_{}_eft{:08d}.jpg'.format(imgName[:-4],idx)
            print(f"Save to {render_output_path}")
            cv2.imwrite(render_output_path, saveImg)
コード例 #19
0
def RunMonomocap(args, video_path, visualizer, bboxdetector, bodymocap, device, renderOutRoot):

    #Set up output folders
    if renderOutRoot:
        outputFileName = 'scene_%08d.jpg' # Hardcoded in glViewer.py
        if os.path.exists(renderOutRoot)==False:
            os.mkdir(renderOutRoot)

        overlaidImageFolder= os.path.join(renderOutRoot, 'overlaid')
        if os.path.exists(overlaidImageFolder)==False:
            os.mkdir(overlaidImageFolder)

        sideImageFolder= os.path.join(renderOutRoot, 'side')
        if os.path.exists(sideImageFolder)==False:
            os.mkdir(sideImageFolder)

        mergedImageFolder= os.path.join(renderOutRoot, 'merged')
        if os.path.exists(mergedImageFolder)==False:
            os.mkdir(mergedImageFolder)

        g_renderDir= os.path.join(renderOutRoot, 'render')
        if os.path.exists(g_renderDir)==False:
            os.mkdir(g_renderDir)

    #Set up input data (images or webcam)
    imageList =[]
    loaded_bboxList =None 
    cap =None
    if os.path.isdir(video_path):       #if video_path is a dir, load all videos


        imageList = sorted(os.listdir(video_path))

        if len(imageList)>0  and imageList[0][-4:] =='json':        #Handling bbox dir input
            print("Found that this input folder has bboxes.")
            bboxFiles = imageList
            imageList=[]
            loaded_bboxList =[]
            for bn in bboxFiles:
                bf = os.path.join(video_path, bn)
                with open(bf,'r') as f:
                    bbox = json.load(f)
                    assert  'imgPath' in bbox and 'bboxes_xywh' in bbox
                    imageList.append(bbox['imgPath'])

                    bboxes_np = [ np.array(d) for d in bbox['bboxes_xywh']]
                    loaded_bboxList.append(bboxes_np)

        else:       #Otherwise, image dir
            imageList = [os.path.join(video_path,f) for f in imageList]
    else:
        cap = cv2.VideoCapture(video_path)
        if os.path.exists(video_path):
            print("valid")
        if cap.isOpened()==False:
            print(f"Failed in opening video: {video_path}")
            assert False

    now = datetime.now()
    seqName = now.today().strftime("%d_%m_%Y_")+ now.strftime("%H%M%S")
    print(f"seqName{seqName}")
    cur_frame = args.startFrame -1
    while(True):
        # print("Start Mocap")
        g_timer.tic()    

        cur_frame += 1        #starting from 0
        meshList =[]
        skelList =[]

        if len(imageList)>0:        #If the path is a folder
            if len(imageList)<=cur_frame:
                break
            elif args.endFrame>=0 and cur_frame > args.endFrame:
                break
            else:
                fName = imageList[cur_frame]
                img_original_bgr  = cv2.imread(fName)
        else:       #cap is None
            _, img_original_bgr = cap.read()
            fName = 'scene_{:08d}.pkl'.format(cur_frame)    

            if img_original_bgr is None: # Restart video at the end
                print("Warninig: img_original_bgr ==  None")
                # cap = cv2.VideoCapture(video_path)
                # ret, camInputFrame = cap.read()
                break   #Stop processing at the end of video

            if cap.isOpened()==False:
                print(">> Error: Input data is not valid or unavailable.")
                if args.url is not None:
                    print(">> Error: There would be version issues of your OpenCV in handling URL as the input stream")
                    print(">> Suggestion 1: Try to download the video via youtube-dl and put the video path as input")
                    print(">> Suggestion 2: Use --download or --d flag to automatically download and process it")
                    print("")
                assert False

        # Our operations on the frame come here
        # if cap is not None:  #If input from VideoCapture
        # img_original_rgb = cv2.cvtColor(img_original_bgr, cv2.COLOR_BGR2RGB)          #Our model is trained with RGB
        # Display the resulting frame
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

        #Check existence of already processed data
        if args.skip and renderOutRoot:
            # viewer2D.ImShow(overlaidImg)
            mergedImgFileName = '{0}/{1}'.format(mergedImageFolder,outputFileName%cur_frame)
            if os.path.exists(mergedImgFileName):
                print(f"Already exists: {mergedImgFileName}")
                continue


        ######################################################
        ## BBox detection

        if loaded_bboxList is not None and len(loaded_bboxList)==len(imageList):
            bboxXYWH_list = loaded_bboxList[cur_frame]
        else:
            bboxXYWH_list = bboxdetector.detectBbox(img_original_bgr)

        if args.bboxout:
            # bboxXYWH_list
            if renderOutRoot is None:
                    print("Please set output folder by --out")
                    assert False
            else:
                bboxOutFolder = os.path.join(renderOutRoot,'bbox')
                if not os.path.exists(bboxOutFolder):
                    os.mkdir(bboxOutFolder)

                outputFileName_json = os.path.join(bboxOutFolder,os.path.basename(fName)[:-4]+'.json')
                fout = open(outputFileName_json,'w')
                temp = [ list(d.astype(int)) for d in bboxXYWH_list ]
                bboxXYWH_list_saved =[]
                for d in temp:
                    bboxXYWH_list_saved.append([int(dd) for dd in d])
                json.dump( {'imgPath': fName, 'bboxes_xywh':bboxXYWH_list_saved}, fout)
                fout.close()
                


        #Sort the bbox using bbox size (to make the order as consistent as possible without tracking)
        diaSize =  [ (x[2]**2 + x[3]**2) for x in bboxXYWH_list]

        idx_big2small = np.argsort(diaSize)[::-1]
        bboxXYWH_list = [ bboxXYWH_list[i] for i in idx_big2small ] #sorted, big2small

        if args.single and len(bboxXYWH_list)>1:
            bboxXYWH_list = [ bboxXYWH_list[0] ]        #nparray (1,4)

            #Chose the biggest one
            # diaSize =  [ (x[2]**2 + x[3]**2) for x in bboxXYWH_list]
            # bigIdx = np.argmax(diaSize)
            # bboxXYWH_list = [bboxXYWH_list[bigIdx]]

        g_debug_bboxonly= False
        if g_debug_bboxonly:
            if False:#len(bboxXYWH_list)>0:
                for bbr in bboxXYWH_list:
                    img_original_bgr = viewer2D.Vis_Bbox(img_original_bgr, bbr)
                    viewer2D.ImShow(img_original_bgr)
            g_timer.toc(average =True, bPrint=True,title="DetectionTime")


            # Capture raw videos (to make a sample data)
            viewer2D.ImShow(img_original_bgr)
            mergedImgFileName = '{0}/{1}'.format(mergedImageFolder,outputFileName%cur_frame)
            cv2.imwrite(mergedImgFileName, img_original_bgr)

            continue
        # g_timer.toc(average =True, bPrint=True,title="Detect")
       
        ######################################################
        ## Body Pose Regression

        if len(bboxXYWH_list)>0:

            mocap_out =[]
            # Old format
            # pred_rotmat_all =[]
            # pred_betas_all =[]
            # pred_camera_all =[]
            # pred_vertices_all =[]
            # pred_joints_3d_all =[]
            # bbox_all =[]
            # boxScale_o2n_all =[]
            # bboxTopLeft_all =[]

            for i, bboxXYHW in enumerate(bboxXYWH_list):

                subjectId = seqName + '_{:03d}'.format(i)       #Without tracking, this value is not consistent

                predoutput = bodymocap.regress(img_original_bgr, bboxXYHW)
                if predoutput is None:
                    continue
                pred_vertices_img = predoutput['pred_vertices_img']
                pred_joints_img = predoutput['pred_joints_img']
                
                    
                tempMesh = {'ver': pred_vertices_img, 'f':  bodymocap.smpl.faces}
                meshList.append(tempMesh)
                skelList.append(pred_joints_img.ravel()[:,np.newaxis])  #(49x3, 1)

                if args.pklout:

                    mocap_single = {
                            'parm_pose': predoutput['pred_rotmat'][0],           #(24,3, 3)
                            'parm_shape': predoutput['pred_betas'][0],             #(10,)
                            'parm_cam': predoutput['pred_camera'],           #[cam_scale, cam_offset_x,, cam_offset_y ]
                            'subjectId': subjectId,       
                            'pred_vertices_imgspace': predoutput['pred_vertices_img'],  #3D SMPL vertices where X,Y are aligned to images
                            'pred_joints_imgspace': predoutput['pred_joints_img'],      #3D joints where X,Y are aligned to images
                            'bbox_xyxy': predoutput['bbox_xyxy'],        #[minX,minY,maxX,maxY]
                            'bbox_topLeft': predoutput['bboxTopLeft'],   #(2,)       #auxiliary data used inside visualization
                            'bbox_scale_o2n': predoutput['boxScale_o2n'],      #scalar #auxiliary data used inside visualization
                            'smpltype': 'smpl',
                            'annotId': -1,
                            'imageName': fName

                            #Old format below
                            # pred_betas_all.append(predoutput['pred_betas'])
                            # pred_camera_all.append(predoutput['pred_camera'])
                            # pred_vertices_all.append(pred_vertices_img)
                            # pred_joints_3d_all.append(pred_joints_img)
                            # bbox_all.append(predoutput['bbox_xyxy'])
                            # bboxTopLeft_all.append(predoutput['bboxTopLeft'])
                            # boxScale_o2n_all.append(predoutput['boxScale_o2n'])
                        }
                    mocap_out.append(mocap_single)

                    #Old format below
                    # pred_rotmat_all.append(predoutput['pred_rotmat'])
                    # pred_betas_all.append(predoutput['pred_betas'])
                    # pred_camera_all.append(predoutput['pred_camera'])
                    # pred_vertices_all.append(pred_vertices_img)
                    # pred_joints_3d_all.append(pred_joints_img)
                    # bbox_all.append(predoutput['bbox_xyxy'])
                    # bboxTopLeft_all.append(predoutput['bboxTopLeft'])
                    # boxScale_o2n_all.append(predoutput['boxScale_o2n'])
        
            ######################################################
            ## Export to pkl
            if args.pklout and len(mocap_out)>0:

                # Old format below
                # pred_rotmat_all = np.concatenate(pred_rotmat_all,axis=0)
                # pred_betas_all = np.concatenate(pred_betas_all,axis=0)
                # pred_camera_all = np.concatenate(pred_camera_all,axis=0)
                # pred_vertices_all = np.concatenate(pred_vertices_all,axis=0)
                # pred_joints_3d_all = np.concatenate(pred_joints_3d_all,axis=0)
                # # bbox_all = np.concatenate(bbox_all)
                # # bboxTopLeft_all = np.concatenate(bboxTopLeft_all)
                # # boxScale_o2n_all =np.concatenate(boxScale_o2n_all)
                # dataOut = {
                #     'pred_rotmat_all': pred_rotmat_all,
                #     'pred_betas_all': pred_betas_all,
                #     # 'cams_person': pred_camera_all,
                #     'pred_camera_all': pred_camera_all,
                #     'pred_joints_3d_all': pred_joints_3d_all,
                #     # 'verts_person_og':pred_vertices_all,
                #     'pred_vertices_all':pred_vertices_all,
                #     'boxScale_o2n_all': boxScale_o2n_all,
                #     'bboxTopLeft_all': bboxTopLeft_all,
                #     'bbox':bbox_all
                # }
                if renderOutRoot is None:
                    print("Please set output folder by --out")
                    assert False
                    
                else:
                    mocapOutFolder = os.path.join(renderOutRoot,'mocap')
                    if not os.path.exists(mocapOutFolder):
                        os.mkdir(mocapOutFolder)

                    outputFileName_pkl = os.path.join(mocapOutFolder,os.path.basename(fName)[:-4]+'.pkl')
                    fout = open(outputFileName_pkl,'wb')
                    pickle.dump(mocap_out, fout)
                    fout.close()
        
        # g_timer.toc(average =True, bPrint=True,title="Detect+Regress")
        ######################################################
        ## Visualization

        if args.noVis == False:        #Visualize
            # img_original  = img_original_bgr[:,:,[2,1,0]]
            # img_original = np.ascontiguousarray(img_original, dtype=np.uint8)
            assert img_original_bgr.shape[0]>0 and img_original_bgr.shape[1]>0

            #Render output to files            
            if renderOutRoot:
                visualizer.visualize_screenless_naive(meshList, skelList, bboxXYWH_list, img_original_bgr)

                overlaidImg = visualizer.renderout['render_camview']
                overlaidImgFileName = '{0}/{1}'.format(overlaidImageFolder,outputFileName%cur_frame)
                cv2.imwrite(overlaidImgFileName, overlaidImg)

                sideImg = visualizer.renderout['render_sideview']
                sideImgFileName = '{0}/{1}'.format(sideImageFolder,outputFileName%cur_frame)
                cv2.imwrite(sideImgFileName, sideImg)

                if True:    #merged view rendering
                    # overlaidImg_resized = cv2.resize(overlaidImg, (img_original_bgr.shape[1], img_original_bgr.shape[0]))
                    img_original_bgr_resized = cv2.resize(img_original_bgr, (overlaidImg.shape[1], overlaidImg.shape[0]))
                    sideImg_resized = cv2.resize(sideImg, (overlaidImg.shape[1], overlaidImg.shape[0]))
                    mergedImg = np.concatenate( (img_original_bgr_resized, overlaidImg, sideImg_resized), axis=1)
                    viewer2D.ImShow(mergedImg,name="merged")

                    # viewer2D.ImShow(overlaidImg)
                    mergedImgFileName = '{0}/{1}'.format(mergedImageFolder,outputFileName%cur_frame)
                    cv2.imwrite(mergedImgFileName, mergedImg)
                    print(f"Saved to {mergedImgFileName}")

            #Do not save files but jut GUI visualization
            else:
                visualizer.visualize_gui_naive(meshList, skelList, bboxXYWH_list, img_original_bgr)
        g_timer.toc(average =True, bPrint=True,title="Detect+Regress+Vis")

    # When everything done, release the capture
    if cap is not None:
        cap.release()
    cv2.destroyAllWindows()

    # Video generation from rendered images
    if args.noVis == False and args.noVideoOut==False:
        if renderOutRoot and os.path.exists( os.path.join(renderOutRoot, 'merged') ): 
            print(">> Generating video in {}/{}.mp4".format(renderOutRoot,os.path.basename(renderOutRoot) ))
            inputFrameDir = os.path.join(renderOutRoot, 'merged')
            outVideo_fileName = os.path.join(renderOutRoot, os.path.basename(renderOutRoot)+'.mp4')
            ffmpeg_cmd = 'ffmpeg -y -f image2 -framerate 25 -pattern_type glob -i "{0}/*.jpg"  -pix_fmt yuv420p -c:v libx264 -x264opts keyint=25:min-keyint=25:scenecut=-1 -vf "scale=trunc(iw/2)*2:trunc(ih/2)*2" {1}'.format(inputFrameDir, outVideo_fileName)
            os.system(ffmpeg_cmd)
コード例 #20
0
def h36m_train_extract(dataset_path, out_path, extract_img=False):

    # convert joints to global order
    h36m_idx = [11, 6, 7, 8, 1, 2, 3, 12, 24, 14, 15, 17, 18, 19, 25, 26, 27]
    global_idx = [14, 3, 4, 5, 2, 1, 0, 16, 12, 17, 18, 9, 10, 11, 8, 7, 6]

    # structs we use
    imgnames_, scales_, centers_, parts_, Ss_ = [], [], [], [], []

    #additional 3D
    poses_, shapes_, skel3D_, has_smpl_ = [], [], [], []

    # users in validation set
    user_list = [1, 5, 6, 7, 8]

    # go over each user
    for user_i in user_list:
        user_name = 'S%d' % user_i

        # bbox_path = os.path.join(dataset_path, user_name, 'MySegmentsMat', 'ground_truth_bb')
        # path with GT 3D pose
        pose_path = os.path.join(dataset_path, user_name,
                                 'Poses_D3_Positions_mono')
        # path with GT 2D pose
        pose2d_path = os.path.join(dataset_path, user_name,
                                   'Poses_D2_Positions')
        # path with videos
        # vid_path = os.path.join(dataset_path, user_name, 'Videos')

        # go over all the sequences of each user
        seq_list = glob.glob(os.path.join(pose_path, '*.cdf'))
        seq_list.sort()
        for seq_i in seq_list:

            print('processing: {}'.format(seq_i))
            # sequence info
            seq_name = seq_i.split('/')[-1]
            action, camera, _ = seq_name.split('.')
            action = action.replace(' ', '_')
            # irrelevant sequences
            if action == '_ALL':
                continue

            # 3D pose file
            poses_3d = pycdf.CDF(seq_i)['Pose'][0]  #Nx96

            #load 2D pose file
            seq_i_2D_pose = os.path.join(pose2d_path, os.path.basename(seq_i))
            poses_2d = pycdf.CDF(seq_i_2D_pose)['Pose'][0]  #Nx64
            poses_2d = np.reshape(poses_2d, (-1, 32, 2))

            # # bbox file
            # bbox_file = os.path.join(bbox_path, seq_name.replace('cdf', 'mat'))
            # bbox_h5py = h5py.File(bbox_file)

            # video file
            if extract_img:
                vid_file = os.path.join(vid_path,
                                        seq_name.replace('cdf', 'mp4'))
                imgs_path = os.path.join(dataset_path, 'images')
                vidcap = cv2.VideoCapture(vid_file)
                success, image = vidcap.read()

            # go over each frame of the sequence
            for frame_i in range(poses_3d.shape[0]):
                # read video frame
                if extract_img:
                    success, image = vidcap.read()
                    if not success:
                        break

                # check if you can keep this frame
                if frame_i % 5 == 0:
                    # image name
                    imgname = '%s_%s.%s_%06d.jpg' % (user_name, action, camera,
                                                     frame_i + 1)

                    #Read img

                    # read GT 3D pose
                    Sall = np.reshape(poses_3d[frame_i, :],
                                      [-1, 3]) / 1000.  #[32,3]
                    S17 = Sall[h36m_idx]
                    S17 -= S17[0]  # root-centered
                    S24 = np.zeros([24, 4])
                    S24[global_idx, :3] = S17
                    S24[global_idx, 3] = 1

                    # # read GT bounding box
                    # mask = bbox_h5py[bbox_h5py['Masks'][frame_i,0]].value.T
                    # ys, xs = np.where(mask==1)
                    # bbox = np.array([np.min(xs), np.min(ys), np.max(xs)+1, np.max(ys)+1])

                    curPose_2d = poses_2d[frame_i, :]
                    min_pt = np.min(curPose_2d, axis=0)
                    max_pt = np.max(curPose_2d, axis=0)
                    bbox = [min_pt[0], min_pt[1], max_pt[0], max_pt[1]]

                    #Skeleton
                    if False:
                        from renderer import glViewer
                        from renderer import viewer2D
                        image = viewer2D.Vis_Bbox_minmaxPt(
                            image, min_pt, max_pt)
                        viewer2D.ImShow(image, waitTime=1)

                        S17s_ = np.array(S17) * 100
                        skelVis = S17s_.ravel()[:, np.newaxis]

                        glViewer.setSkeleton([skelVis])
                        glViewer.show()
                        continue

                    center = [(bbox[2] + bbox[0]) / 2, (bbox[3] + bbox[1]) / 2]
                    scale = 0.9 * max(bbox[2] - bbox[0],
                                      bbox[3] - bbox[1]) / 200.

                    # store data
                    imgnames_.append(os.path.join('images', imgname))
                    centers_.append(center)
                    scales_.append(scale)
                    Ss_.append(S24)

    # store the data struct
    if not os.path.isdir(out_path):
        os.makedirs(out_path)
    out_file = os.path.join(out_path, 'h36m_train.npz')
    np.savez(out_file,
             imgname=imgnames_,
             center=centers_,
             scale=scales_,
             part=parts_,
             S=Ss_)
コード例 #21
0
    def _visualize_screenless_naive(self,
                                    meshList,
                                    skelList=None,
                                    body_bbox_list=None,
                                    img_original=None,
                                    vis=False,
                                    maxHeight=1080):
        """
            args:
                meshList: list of {'ver': pred_vertices, 'f': smpl.faces}
                skelList: list of [JointNum*3, 1]       (where 1 means num. of frames in glviewer)
                bbr_list: list of [x,y,w,h] 
            output:
                #Rendered images are saved in 
                self.renderout['render_camview']
                self.renderout['render_sideview']

            #Note: The size of opengl rendering is restricted by the current screen size. Set the maxHeight accordingly

        """
        assert self.renderer is not None

        if len(meshList) == 0:
            # sideImg = cv2.resize(sideImg, (renderImg.shape[1], renderImg.shape[0]) )
            self.renderout = {}
            self.renderout['render_camview'] = img_original.copy()

            blank = np.ones(img_original.shape,
                            dtype=np.uint8) * 255  #generate blank image
            self.renderout['render_sideview'] = blank

            return

        if body_bbox_list is not None:
            for bbr in body_bbox_list:
                viewer2D.Vis_Bbox(img_original, bbr)
        # viewer2D.ImShow(img_original)

        #Check image height
        imgHeight, imgWidth = img_original.shape[0], img_original.shape[1]
        if maxHeight < imgHeight:  #Resize
            ratio = maxHeight / imgHeight

            #Resize Img
            newWidth = int(imgWidth * ratio)
            newHeight = int(imgHeight * ratio)
            img_original_resized = cv2.resize(img_original,
                                              (newWidth, newHeight))

            #Resize skeleton
            for m in meshList:
                m['ver'] *= ratio

            for s in skelList:
                s *= ratio

        else:
            img_original_resized = img_original

        self.renderer.setWindowSize(img_original_resized.shape[1],
                                    img_original_resized.shape[0])
        self.renderer.setBackgroundTexture(img_original_resized)
        self.renderer.setViewportSize(img_original_resized.shape[1],
                                      img_original_resized.shape[0])

        # self.renderer.add_mesh(meshList[0]['ver'],meshList[0]['f'])
        self.renderer.clear_mesh()
        for mesh in meshList:
            self.renderer.add_mesh(mesh['ver'], mesh['f'])
        self.renderer.showBackground(True)
        self.renderer.setWorldCenterBySceneCenter()
        self.renderer.setCameraViewMode("cam")
        # self.renderer.setViewportSize(img_original_resized.shape[1], img_original_resized.shape[0])

        self.renderer.display()
        renderImg = self.renderer.get_screen_color_ibgr()

        if vis:
            viewer2D.ImShow(renderImg, waitTime=1, name="rendered")

        ###Render Side View
        self.renderer.setCameraViewMode("free")
        self.renderer.setViewAngle(90, 20)
        self.renderer.showBackground(False)
        self.renderer.setViewportSize(img_original_resized.shape[1],
                                      img_original_resized.shape[0])
        self.renderer.display()
        sideImg = self.renderer.get_screen_color_ibgr()  #Overwite on rawImg

        if vis:
            viewer2D.ImShow(sideImg, waitTime=0, name="sideview")

        # sideImg = cv2.resize(sideImg, (renderImg.shape[1], renderImg.shape[0]) )
        self.renderout = {}
        self.renderout['render_camview'] = renderImg
        self.renderout['render_sideview'] = sideImg
コード例 #22
0
def visEFT_multiSubjects(renderer):
    inputDir = args.fit_dir
    imgDir = args.img_dir

    smplModelDir = args.smpl_dir
    smpl = SMPL(smplModelDir, batch_size=1, create_transl=False)
    
    eft_fileList  = listdir(inputDir)       #Check all fitting files
    print(">> Found {} files in the fitting folder {}".format(len(eft_fileList), inputDir))

    #Aggregate all efl per image
    eft_perimage ={}
    for f in sorted(eft_fileList):
        #Load
        imageName = f[:f.rfind('_')]
        if imageName not in eft_perimage.keys():
            eft_perimage[imageName] =[]

        eft_perimage[imageName].append(f)


    for imgName in eft_perimage:
        eftFiles_perimage = eft_perimage[imgName]
        
        renderer.clear_mesh()

        for idx,f in enumerate(eftFiles_perimage):
            
            #Load EFT data
            fileFullPath = join(inputDir, f)
            with open(fileFullPath,'rb') as f:
                eft_data = pickle.load(f)

            #Get raw image path
            if idx==0:
                imgFullPath = eft_data['imageName'][0]
                imgFullPath =os.path.join(imgDir, os.path.basename(imgFullPath) )
                assert os.path.exists(imgFullPath)
                rawImg = cv2.imread(imgFullPath)
                print(f'Input image: {imgFullPath}')

            #EFT data
            bbox_scale = eft_data['scale'][0]
            bbox_center = eft_data['center'][0]

            pred_camera = eft_data['pred_camera']
            pred_betas = torch.from_numpy(eft_data['pred_shape'])
            pred_pose_rotmat = torch.from_numpy(eft_data['pred_pose_rotmat'])        

            #COCO only. Annotation index
            print("COCO annotId: {}".format(eft_data['annotId']))

            #Obtain skeleton and smpl data
            smpl_output = smpl(betas=pred_betas, body_pose=pred_pose_rotmat[:,1:], global_orient=pred_pose_rotmat[:,0].unsqueeze(1), pose2rot=False )
            smpl_vertices = smpl_output.vertices.detach().cpu().numpy() 
            smpl_joints_3d = smpl_output.joints.detach().cpu().numpy() 

            #Crop image
            croppedImg, boxScale_o2n, bboxTopLeft = crop_bboxInfo(rawImg.copy(), bbox_center, bbox_scale, (BBOX_IMG_RES, BBOX_IMG_RES) )

            ########################
            # Visualize
            # Visualize 2D image
            if False:
                viewer2D.ImShow(rawImg, name='rawImg', waitTime=1)      #You should press any key 
                viewer2D.ImShow(croppedImg, name='croppedImg', waitTime=0)

            # Visualization Mesh on raw images
            if True:    
                b=0
                camParam_scale = pred_camera[b,0]
                camParam_trans = pred_camera[b,1:]
                pred_vert_vis = smpl_vertices[b]
                smpl_joints_3d_vis = smpl_joints_3d[b]

                if False:#args.onbbox:      #Always in the original image
                    pred_vert_vis = convert_smpl_to_bbox(pred_vert_vis, camParam_scale, camParam_trans)
                    smpl_joints_3d_vis = convert_smpl_to_bbox(smpl_joints_3d_vis, camParam_scale, camParam_trans)
                    renderer.setBackgroundTexture(croppedImg)
                    renderer.setViewportSize(croppedImg.shape[1], croppedImg.shape[0])
                else:
                    #Covert SMPL to BBox first
                    pred_vert_vis = convert_smpl_to_bbox(pred_vert_vis, camParam_scale, camParam_trans)
                    smpl_joints_3d_vis = convert_smpl_to_bbox(smpl_joints_3d_vis, camParam_scale, camParam_trans)

                    #From cropped space to original
                    pred_vert_vis = convert_bbox_to_oriIm(pred_vert_vis, boxScale_o2n, bboxTopLeft, rawImg.shape[1], rawImg.shape[0]) 
                    smpl_joints_3d_vis = convert_bbox_to_oriIm(smpl_joints_3d_vis, boxScale_o2n, bboxTopLeft, rawImg.shape[1], rawImg.shape[0])
                    renderer.setBackgroundTexture(rawImg)
                    renderer.setViewportSize(rawImg.shape[1], rawImg.shape[0])

                pred_meshes = {'ver': pred_vert_vis, 'f': smpl.faces}
                v = pred_meshes['ver'] 
                f = pred_meshes['f']

                #Visualize in the original image spaceq
                # renderer.set_mesh(v,f)
                renderer.add_mesh(v,f)

        #Render Mesh on the camera view
        renderer.showBackground(True)
        renderer.setWorldCenterBySceneCenter()
        renderer.setCameraViewMode("cam")
        renderer.display()
        overlaid = renderer.get_screen_color_ibgr()        #Overwite on rawImg
        viewer2D.ImShow(overlaid,waitTime=1,name="overlaid")

        #Render Mesh on the rotating view
        renderer.showBackground(False)
        renderer.setWorldCenterBySceneCenter()
        renderer.setCameraViewMode("free")
        for i in range(90):
            renderer.setViewAngle(i*4,0)
            renderer.display()
            sideImg = renderer.get_screen_color_ibgr()        #Overwite on rawImg
            viewer2D.ImShow(sideImg,waitTime=1,name="otherviews")
            
        if True:    #Save the rendered image to files
            if os.path.exists(args.render_dir) == False:
                os.mkdir(args.render_dir)
            render_output_path = args.render_dir + '/render_{}.jpg'.format(imgName)
            print(f"Save to {render_output_path}")
            cv2.imwrite(render_output_path, rawImg)
コード例 #23
0
ファイル: visEFTFit_gui.py プロジェクト: arturojreal/eft
def visEFT_singleSubject(inputDir, imDir, smplModelDir, bUseSMPLX):
    if bUseSMPLX:
        smpl = SMPLX(smplModelDir, batch_size=1, create_transl=False)
    else:
        smpl = SMPL(smplModelDir, batch_size=1, create_transl=False)
    fileList  = listdir(inputDir)       #Check all fitting files

    print(">> Found {} files in the fitting folder {}".format(len(fileList), inputDir))
    totalCnt =0
    erroneousCnt =0
    # fileList =['00_00_00008422_0.pkl', '00_00_00008422_1731.pkl', '00_00_00008422_3462.pkl']     #debug
    for f in sorted(fileList):
        
        #Load
        fileFullPath = join(inputDir, f)
        with open(fileFullPath,'rb') as f:
            dataDict = pickle.load(f)
        print(f"Loaded :{fileFullPath}")
        if 'imageName' in dataDict.keys():  #If this pkl has only one instance. Made this to hand panoptic output where pkl has multi instances
            dataDict = {0:dataDict}

        for jj, k in enumerate(dataDict):
            if jj%50 !=0:
                continue
            data = dataDict[k]
            # print(data['subjectId'])
            # continue
            if 'smpltype' in data:
                if (data['smpltype'] =='smpl' and bUseSMPLX) or (data['smpltype'] =='smplx' and bUseSMPLX==False):
                    print("SMPL type mismatch error")
                    assert False

            imgFullPathOri = data['imageName'][0]
            imgFullPath = os.path.join(imDir, os.path.basename(imgFullPathOri))


            data['subjectId'] =0 #TODO debug

            fileName = "{}_{}".format(data['subjectId'],  os.path.basename(imgFullPathOri)[:-4])
            if args.bRenderToFiles and os.path.exists(os.path.join(render_dirName, fileName+".jpg")):
                continue

            if True:    #Additional path checking, if not valid
                if os.path.exists(imgFullPath) == False:
                    imgFullPath =getpath_level(imDir, imgFullPathOri ,1)
                if os.path.exists(imgFullPath) == False:
                    imgFullPath =getpath_level(imDir, imgFullPathOri,2)
                if os.path.exists(imgFullPath) == False:
                    imgFullPath =getpath_level(imDir, imgFullPathOri, 3 )
                
            scale = data['scale'][0]
            center = data['center'][0]

            # print(data['annotId'])
            ours_betas = torch.from_numpy(data['pred_shape'])
            ours_pose_rotmat = torch.from_numpy(data['pred_pose_rotmat'])
            # spin_betas = torch.from_numpy(data['opt_beta'])
            
            #Compute 2D reprojection error
            # if not (data['loss_keypoints_2d']<0.0001 or data['loss_keypoints_2d']>0.001 :
            #     continue
            maxBeta = abs(torch.max( abs(ours_betas)).item())

            if data['loss_keypoints_2d']>0.0005 or maxBeta>3:
                erroneousCnt +=1
            
            print(">>> loss2d: {}, maxBeta: {}".format( data['loss_keypoints_2d'],maxBeta) )

            # spin_pose = torch.from_numpy(data['opt_pose'])
            pred_camera_vis = data['pred_camera']
    
            if os.path.exists(imgFullPath) == False:
                print(imgFullPath)
                assert os.path.exists(imgFullPath)
            rawImg = cv2.imread(imgFullPath)
            print(imgFullPath)

            croppedImg, boxScale_o2n, bboxTopLeft = crop_bboxInfo(rawImg, center, scale, (BBOX_IMG_RES, BBOX_IMG_RES) )

            #Visualize 2D image
            if args.bRenderToFiles ==False:
                viewer2D.ImShow(rawImg, name='rawImg', waitTime=10)      #You should press any key 
                viewer2D.ImShow(croppedImg, name='croppedImg', waitTime=10)

            ours_output = smpl(betas=ours_betas, body_pose=ours_pose_rotmat[:,1:], global_orient=ours_pose_rotmat[:,0].unsqueeze(1), pose2rot=False )
            ours_vertices = ours_output.vertices.detach().cpu().numpy() 
            ours_joints_3d = ours_output.joints.detach().cpu().numpy() 

            #Visualize 3D mesh and 3D skeleton in BBox Space
            if True:
                b =0
                camParam_scale = pred_camera_vis[b,0]
                camParam_trans = pred_camera_vis[b,1:]

                ############### Visualize Mesh ############### 
                pred_vert_vis = ours_vertices[b].copy()
                pred_vert_vis = convert_smpl_to_bbox(pred_vert_vis, camParam_scale, camParam_trans)
                pred_meshes = {'ver': pred_vert_vis, 'f': smpl.faces}
                glViewer.setMeshData([pred_meshes], bComputeNormal= True)

                ################ Visualize Skeletons ############### 
                #Vis pred-SMPL joint
                pred_joints_vis = ours_joints_3d[b,:,:3].copy()     #(N,3)
                pred_joints_vis = convert_smpl_to_bbox(pred_joints_vis, camParam_scale, camParam_trans)
                glViewer.setSkeleton( [pred_joints_vis.ravel()[:,np.newaxis]])

                ################ Other 3D setup############### 
                glViewer.setBackgroundTexture(croppedImg)
                glViewer.setWindowSize(croppedImg.shape[1]*args.windowscale, croppedImg.shape[0]*args.windowscale)
                glViewer.SetOrthoCamera(True)

                print("Press 'q' in the 3D window to go to the next sample")
                glViewer.show(0)
            
            #Visualize 3D mesh and 3D skeleton on original image space
            if True:
                b =0
                camParam_scale = pred_camera_vis[b,0]
                camParam_trans = pred_camera_vis[b,1:]

                ############### Visualize Mesh ############### 
                pred_vert_vis = ours_vertices[b].copy()
                pred_vert_vis = convert_smpl_to_bbox(pred_vert_vis, camParam_scale, camParam_trans)

                #From cropped space to original
                pred_vert_vis = convert_bbox_to_oriIm(pred_vert_vis, boxScale_o2n, bboxTopLeft, rawImg.shape[1], rawImg.shape[0]) 
                pred_meshes = {'ver': pred_vert_vis, 'f': smpl.faces}
                glViewer.setMeshData([pred_meshes], bComputeNormal= True)

                # ################ Visualize Skeletons ############### 
                #Vis pred-SMPL joint
                pred_joints_vis = ours_joints_3d[b,:,:3].copy()     #(N,3)
                pred_joints_vis = convert_smpl_to_bbox(pred_joints_vis, camParam_scale, camParam_trans)
                pred_joints_vis = convert_bbox_to_oriIm(pred_joints_vis, boxScale_o2n, bboxTopLeft, rawImg.shape[1], rawImg.shape[0]) 


                glViewer.setSkeleton( [pred_joints_vis.ravel()[:,np.newaxis]])

                glViewer.setBackgroundTexture(rawImg)
                glViewer.setWindowSize(rawImg.shape[1]*args.magnifyFactor, rawImg.shape[0]*args.magnifyFactor)
                glViewer.SetOrthoCamera(True)

                print("Press 'q' in the 3D window to go to the next sample")

                if args.bRenderToFiles:        #Export rendered files
                    if os.path.exists(render_dirName) == False:     #make a output folder if necessary
                         os.mkdir(render_dirName)

                    # subjId = data['subjectId'][22:24]
                    fileName = "{}_{}".format(data['subjectId'],  os.path.basename(imgFullPathOri)[:-4])

                    # rawImg = cv2.putText(rawImg,data['subjectId'],(100,100), cv2.FONT_HERSHEY_PLAIN, 2, (255,255,0),2)
                    glViewer.render_on_image(render_dirName, fileName, rawImg)
                    print(f"Render to {fileName}")
コード例 #24
0
ファイル: visEFTFit_coco.py プロジェクト: liuguoyou/eft
def visEFT_singleSubject(renderer):

    bStopForEachSample = args.waitforkeys  #if True, it will wait for any key pressed to move to the next sample

    inputData = args.fit_data
    imgDir = args.img_dir

    #Load SMPL model
    smplModelPath = args.smpl_dir + '/basicModel_neutral_lbs_10_207_0_v1.0.0.pkl'
    smpl = SMPL(smplModelPath, batch_size=1, create_transl=False)

    print("Loading coco annotation from:{}".format(args.cocoAnnotFile))
    assert os.path.exists(args.cocoAnnotFile)
    cocoAnnotDic = loadCOCOAnnot(args.cocoAnnotFile)

    #Load EFT fitting data
    print(f"Loading EFT data from {inputData}")
    if os.path.exists(inputData):
        with open(inputData, 'r') as f:
            eft_data = json.load(f)
            print("EFT data: ver {}".format(eft_data['ver']))
            eft_data_all = eft_data['data']
    else:
        print(f"ERROR:: Cannot find EFT data: {inputData}")
        assert False

    #Visualize each EFT Fitting output
    for idx, eft_data in enumerate(eft_data_all):

        #Get raw image path
        imgFullPath = eft_data['imageName']
        imgName = os.path.basename(imgFullPath)
        imgFullPath = os.path.join(imgDir, imgName)
        if os.path.exists(imgFullPath) == False:
            print(f"Img path is not valid: {imgFullPath}")
            assert False
        rawImg = cv2.imread(imgFullPath)
        print(f'Input image: {imgFullPath}')

        #EFT data
        bbox_scale = eft_data['bbox_scale']
        bbox_center = eft_data['bbox_center']

        pred_camera = np.array(eft_data['parm_cam'])
        pred_betas = np.reshape(
            np.array(eft_data['parm_shape'], dtype=np.float32),
            (1, 10))  #(10,)
        pred_betas = torch.from_numpy(pred_betas)

        pred_pose_rotmat = np.reshape(
            np.array(eft_data['parm_pose'], dtype=np.float32),
            (1, 24, 3, 3))  #(24,3,3)
        pred_pose_rotmat = torch.from_numpy(pred_pose_rotmat)

        keypoint_2d_validity = eft_data['joint_validity_openpose18']

        #COCO only. Annotation index
        print("COCO annotId: {}".format(eft_data['annotId']))
        annot = cocoAnnotDic[eft_data['annotId']]
        print(annot['bbox'])

        ########################
        #Visualize COCO annotation
        annot_keypoint = np.reshape(
            np.array(annot['keypoints'], dtype=np.float32), (-1, 3))  #17,3
        rawImg = viewer2D.Vis_Skeleton_2D_coco(annot_keypoint[:, :2],
                                               annot_keypoint[:, 2],
                                               image=rawImg)
        rawImg = viewer2D.Vis_Bbox(rawImg, annot['bbox'], color=(0, 255, 0))

        #Get SMPL mesh and joints from SMPL parameters
        smpl_output = smpl(betas=pred_betas,
                           body_pose=pred_pose_rotmat[:, 1:],
                           global_orient=pred_pose_rotmat[:, [0]],
                           pose2rot=False)
        smpl_vertices = smpl_output.vertices.detach().cpu().numpy()[0]
        smpl_joints_3d = smpl_output.joints.detach().cpu().numpy()[0]

        #Crop image using cropping information
        croppedImg, boxScale_o2n, bboxTopLeft = crop_bboxInfo(
            rawImg, bbox_center, bbox_scale, (BBOX_IMG_RES, BBOX_IMG_RES))

        ########################
        # Visualization of EFT
        ########################

        # Visualize 2D image
        if True:
            viewer2D.ImShow(rawImg, name='rawImg',
                            waitTime=1)  #You should press any key
            viewer2D.ImShow(croppedImg, name='croppedImg', waitTime=1)

            #Convert bbox_center, bbox_scale --> bbox_xyxy
            bbox_xyxy = conv_bboxinfo_bboxXYXY(bbox_scale, bbox_center)
            img_bbox = viewer2D.Vis_Bbox_minmaxPt(rawImg.copy(), bbox_xyxy[:2],
                                                  bbox_xyxy[2:])
            viewer2D.ImShow(img_bbox, name='img_bbox', waitTime=1)

        # Visualization Mesh
        if True:
            camParam_scale = pred_camera[0]
            camParam_trans = pred_camera[1:]
            pred_vert_vis = smpl_vertices
            smpl_joints_3d_vis = smpl_joints_3d

            if True:  #args.onbbox:
                pred_vert_vis = convert_smpl_to_bbox(pred_vert_vis,
                                                     camParam_scale,
                                                     camParam_trans)
                smpl_joints_3d_vis = convert_smpl_to_bbox(
                    smpl_joints_3d_vis, camParam_scale, camParam_trans)
                renderer.setBackgroundTexture(croppedImg)
                renderer.setViewportSize(croppedImg.shape[1],
                                         croppedImg.shape[0])

            pred_meshes = {'ver': pred_vert_vis, 'f': smpl.faces}
            v = pred_meshes['ver']
            f = pred_meshes['f']

            #Visualize in the original image space
            renderer.set_mesh(v, f)
            renderer.showBackground(True)
            renderer.setWorldCenterBySceneCenter()
            renderer.setCameraViewMode("cam")

            renderer.setViewportSize(croppedImg.shape[1], croppedImg.shape[0])
            renderer.display()
            renderImg = renderer.get_screen_color_ibgr()
            viewer2D.ImShow(renderImg, waitTime=1)

        # Visualization Mesh on side view
        if True:
            renderer.showBackground(False)
            renderer.setWorldCenterBySceneCenter()
            renderer.setCameraViewMode("side")

            renderer.setViewportSize(croppedImg.shape[1], croppedImg.shape[0])
            renderer.display()
            sideImg = renderer.get_screen_color_ibgr()  #Overwite on rawImg
            viewer2D.ImShow(sideImg, waitTime=1)

            sideImg = cv2.resize(sideImg,
                                 (renderImg.shape[1], renderImg.shape[0]))

        #Visualize camera view and side view
        saveImg = np.concatenate((renderImg, sideImg), axis=1)

        if bStopForEachSample:
            viewer2D.ImShow(
                saveImg, waitTime=0
            )  #waitTime=0 means that it will wait for any key pressed
        else:
            viewer2D.ImShow(saveImg, waitTime=1)

        #Save the rendered image to files
        if False:
            if os.path.exists(args.render_dir) == False:
                os.mkdir(args.render_dir)
            render_output_path = args.render_dir + '/render_{:08d}.jpg'.format(
                idx)
            print(f"Save to {render_output_path}")
            cv2.imwrite(render_output_path, saveImg)
コード例 #25
0
def LoadAllH36mdata_wSMPL_perSeq(out_path):
    # data_dir = '/home/hjoo/data/h36m-fetch/human36m_50fps/'
    # data_dir = '/home/hjoo/data/h36m-fetch/human36m_10fps/'

    list_skel2Ds_h36m = []
    list_skel3Ds_h36m = []
    list_smplPose = []
    list_smplShape = []
    list_openpose = []

    list_imgNames = []
    list_scale = []
    list_center = []

    # list_joint2d_spin24 = []
    # list_joint3d_spin24 = []

    TRAIN_SUBJECTS = [1, 5, 6, 7, 8]
    actionList = [
        "Directions", "Discussion", "Eating", "Greeting", "Phoning", "Photo",
        "Posing", "Purchases", "Sitting", "SittingDown", "Smoking", "Waiting",
        "WalkDog", "Walking", "WalkTogether"
    ]

    subjectList = TRAIN_SUBJECTS

    for subId in subjectList:
        for action in actionList:

            gtPathList = sorted(
                glob.glob('{}/S{}/{}_*/*/gt_poses_coco_smpl.pkl'.format(
                    h36mraw_dir, subId, action)))

            print("S{} - {}: {} files".format(subId, action, len(gtPathList)))

            for gtPath in gtPathList:
                with open(gtPath, 'rb') as f:
                    gt_data = pickle.load(f, encoding='latin1')

                #Get Image List
                imgDir = os.path.dirname(gtPath)
                imgList_original = sorted(
                    glob.glob(os.path.join(imgDir, '*.png')))
                folderLeg = len(h36mraw_dir) + 1
                imgList = [n[folderLeg:] for n in imgList_original]
                data2D_h36m = np.array(gt_data['2d'])  #List -> (N,17,2)
                data3D_h36m = np.array(gt_data['3d'])  #List -> (N,17,3)
                data3D_smplParams_pose = np.array(
                    gt_data['smplParms']['poses_camCoord'])  #List -> (N,72)
                data3D_smplParams_shape = np.array(
                    gt_data['smplParms']['betas'])  #(10,)

                N = data3D_smplParams_pose.shape[0]
                data3D_smplParams_shape = np.repeat(
                    data3D_smplParams_shape[np.newaxis, :], N,
                    axis=0)  #List -> (N,10)

                #Scaling skeleton 3D (currently mm) -> meter
                data3D_h36m *= 0.001
                #optional (centering)
                data3D_h36m = data3D_h36m - data3D_h36m[:, 0:1, :]

                scalelist = []
                centerlist = []
                bboxlist = []
                #Generate BBox
                for i in range(len(data2D_h36m)):
                    min_pt = np.min(data2D_h36m[i], axis=0)
                    max_pt = np.max(data2D_h36m[i], axis=0)
                    bbox = [min_pt[0], min_pt[1], max_pt[0], max_pt[1]]
                    center = [(bbox[2] + bbox[0]) / 2, (bbox[3] + bbox[1]) / 2]
                    scale = scaleFactor * max(bbox[2] - bbox[0],
                                              bbox[3] - bbox[1]) / 200

                    bboxlist.append(bbox)
                    centerlist.append(center)
                    scalelist.append(scale)

                    bDraw = True
                    if bDraw:
                        rawImg = cv2.imread(imgFullPath)

                        # bbox_xyxy = conv_bboxinfo_centerscale_to_bboxXYXY(center, scale)
                        # rawImg = viewer2D.Vis_Bbox_minmaxPt(rawImg,bbox_xyxy[:2], bbox_xyxy[2:])
                        croppedImg, boxScale_o2n, bboxTopLeft = crop_bboxInfo(
                            rawImg, center, scale,
                            (constants.IMG_RES, constants.IMG_RES))

                        #Visualize image
                        if False:
                            rawImg = viewer2D.Vis_Skeleton_2D_SPIN49(
                                data['keypoint2d'][0][:, :2],
                                pt2d_visibility=data['keypoint2d'][0][:, 2],
                                image=rawImg)
                            viewer2D.ImShow(rawImg, name='rawImg')
                            viewer2D.ImShow(croppedImg, name='croppedImg')

                        b = 0
                        ############### Visualize Mesh ###############
                        camParam_scale = pred_camera_vis[b, 0]
                        camParam_trans = pred_camera_vis[b, 1:]
                        pred_vert_vis = ours_vertices[b].copy()
                        pred_vert_vis = convert_smpl_to_bbox(
                            pred_vert_vis, camParam_scale, camParam_trans)

                        #From cropped space to original
                        pred_vert_vis = convert_bbox_to_oriIm(
                            pred_vert_vis, boxScale_o2n, bboxTopLeft,
                            rawImg.shape[1], rawImg.shape[0])

                        #Generate multi-level BBOx
                        bbox_list = multilvel_bbox_crop_gen(rawImg,
                                                            pred_vert_vis,
                                                            center,
                                                            scale,
                                                            bDebug=False)

                        if False:
                            pred_meshes = {
                                'ver': pred_vert_vis,
                                'f': smpl.faces
                            }
                            glViewer.setMeshData([pred_meshes],
                                                 bComputeNormal=True)

                            # ################ Visualize Skeletons ###############
                            #Vis pred-SMPL joint
                            pred_joints_vis = ours_joints_3d[
                                b, :, :3].copy()  #(N,3)
                            pred_joints_vis = convert_smpl_to_bbox(
                                pred_joints_vis, camParam_scale,
                                camParam_trans)
                            pred_joints_vis = convert_bbox_to_oriIm(
                                pred_joints_vis, boxScale_o2n, bboxTopLeft,
                                rawImg.shape[1], rawImg.shape[0])

                            glViewer.setBackgroundTexture(rawImg)
                            glViewer.setWindowSize(rawImg.shape[1],
                                                   rawImg.shape[0])
                            glViewer.SetOrthoCamera(True)
                            glViewer.show(1)

                assert len(imgList) == len(data2D_h36m)
                assert len(imgList) == len(data3D_h36m)
                assert len(imgList) == len(data3D_smplParams_pose)
                assert len(imgList) == len(data3D_smplParams_shape)
                assert len(imgList) == len(scalelist)
                assert len(imgList) == len(centerlist)
                assert len(imgList) == len(bboxlist)

                list_skel2Ds_h36m.append(data2D_h36m)
                list_skel3Ds_h36m.append(data3D_h36m)
                list_smplPose.append(data3D_smplParams_pose)
                list_smplShape.append(data3D_smplParams_shape)

                list_imgNames += imgList
                list_scale += scalelist
                list_center += centerlist

                blankopenpose = np.zeros([N, 25, 3])
                list_openpose.append(blankopenpose)

                #Debug 2D Visualize
                if True:
                    for idx in range(data2D_h36m.shape[0]):
                        img = cv2.imread(imgList_original[idx])
                        img = viewer2D.Vis_Skeleton_2D_H36m(data2D_h36m[idx],
                                                            image=img)
                        img = viewer2D.Vis_Bbox_minmaxPt(
                            img, bboxlist[idx][:2], bboxlist[idx][2:])
                        viewer2D.ImShow(img)

                #Debug 3D Visualize smpl_coco
                if False:
                    # data3D_coco_vis = np.reshape(data3D_coco, (data3D_coco.shape[0],-1)).transpose()   #(Dim, F)
                    # data3D_coco_vis *=0.1   #mm to cm
                    # glViewer.setSkeleton( [ data3D_coco_vis] ,jointType='smplcoco')
                    # glViewer.show()

                    #Debug 3D Visualize, h36m
                    data3D_h36m_vis = np.reshape(
                        data3D_h36m,
                        (data3D_h36m.shape[0], -1)).transpose()  #(Dim, F)
                    data3D_h36m_vis *= 100  #meter to cm

                    # data3D_smpl24 = np.reshape(data3D_smpl24, (data3D_smpl24.shape[0],-1)).transpose()   #(Dim, F)
                    # data3D_smpl24 *=0.1

                    glViewer.setSkeleton([data3D_h36m_vis],
                                         jointType='smplcoco')
                    glViewer.show()

            #     break   #debug
            # break       #debug

    list_skel2Ds_h36m = np.vstack(
        list_skel2Ds_h36m)  #List of (N,17,2) ->  (NM, 17, 2)
    list_skel3Ds_h36m = np.vstack(
        list_skel3Ds_h36m)  #List of (N,17,3) ->  (NM, 17, 3)
    list_smplPose = np.vstack(list_smplPose)  #List of (N,72) ->  (NM, 72)
    list_smplShape = np.vstack(list_smplShape)  #List of (N,10) ->  (NM, 10/)
    list_openpose = np.vstack(list_openpose)  #List of (N,10) ->  (NM, 10/)

    assert len(list_imgNames) == list_skel2Ds_h36m.shape[0]
    assert len(list_imgNames) == list_skel3Ds_h36m.shape[0]
    assert len(list_imgNames) == list_smplPose.shape[0]
    assert len(list_imgNames) == list_smplShape.shape[0]
    assert len(list_imgNames) == list_openpose.shape[0]

    assert len(list_imgNames) == len(list_scale)
    assert len(list_imgNames) == len(list_center)

    #Convert H36M -> SPIN24
    # convert joints to global order
    # h36m_idx = [11, 6, 7, 8, 1, 2, 3, 12, 24, 14, 15, 17, 18, 19, 25, 26, 27]
    h36m_idx = [0, 4, 5, 6, 1, 2, 3, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
    global_idx = [14, 3, 4, 5, 2, 1, 0, 16, 12, 17, 18, 9, 10, 11, 8, 7, 6]

    sampleNum = len(list_imgNames)
    joint2d_spin24 = np.zeros((sampleNum, 24, 3))
    joint2d_spin24[:, global_idx, :2] = list_skel2Ds_h36m[:, h36m_idx, :]
    joint2d_spin24[:, global_idx, 2] = 1

    joint3d_spin24 = np.zeros((sampleNum, 24, 4))
    joint3d_spin24[:, global_idx, :3] = list_skel3Ds_h36m[:, h36m_idx, :]
    joint3d_spin24[:, global_idx, 3] = 1

    list_has_smpl = np.ones((sampleNum, ), dtype=np.uint8)

    # store the data struct
    if not os.path.isdir(out_path):
        os.makedirs(out_path)

    out_file = os.path.join(out_path, 'h36m_training_fair_meter.npz')
    print("output: {}".format(out_file))
    np.savez(out_file,
             imgname=list_imgNames,
             center=list_center,
             scale=list_scale,
             part=joint2d_spin24,
             pose=list_smplPose,
             shape=list_smplShape,
             has_smpl=list_has_smpl,
             S=joint3d_spin24,
             openpose=list_openpose)
コード例 #26
0
    def regress(self, img_original, bbox_XYWH, bExport=True):
        """
            args: 
                img_original: original raw image (BGR order by using cv2.imread)
                bbox_XYWH: bounding box around the target: (minX,minY,width, height)
            outputs:
                Default output:
                    pred_vertices_img:
                    pred_joints_vis_img:
                if bExport==True
                    pred_rotmat
                    pred_betas
                    pred_camera
                    bbox: [bbr[0], bbr[1],bbr[0]+bbr[2], bbr[1]+bbr[3]])
                    bboxTopLeft:  bbox top left (redundant)
                    boxScale_o2n: bbox scaling factor (redundant) 
        """
        img, norm_img, boxScale_o2n, bboxTopLeft, bbox = process_image_bbox(
            img_original, bbox_XYWH, input_res=constants.IMG_RES)
        if img is None:
            return None

        with torch.no_grad():
            pred_rotmat, pred_betas, pred_camera = self.model_regressor(
                norm_img.to(self.device))
            pred_output = self.smpl(betas=pred_betas,
                                    body_pose=pred_rotmat[:, 1:],
                                    global_orient=pred_rotmat[:,
                                                              0].unsqueeze(1),
                                    pose2rot=False)
            pred_vertices = pred_output.vertices
            pred_joints_3d = pred_output.joints

            # img_original = img
            if False:
                #show cropped image
                # curImgVis = img
                curImgVis = self.de_normalize_img(img).cpu().numpy()
                curImgVis = np.transpose(curImgVis, (1, 2, 0)) * 255.0
                curImgVis = curImgVis[:, :, [2, 1, 0]]
                curImgVis = np.ascontiguousarray(curImgVis, dtype=np.uint8)

                viewer2D.ImShow(curImgVis, name='input_{}'.format(idx))

            pred_vertices = pred_vertices[0].cpu().numpy()
            img = img[:, :, [2, 1, 0]]
            img = np.ascontiguousarray(img * 255, dtype=np.uint8)

            pred_camera = pred_camera.cpu().numpy().ravel()
            camScale = pred_camera[0]  # *1.15
            camTrans = pred_camera[1:]

            #Convert mesh to original image space (X,Y are aligned to image)
            pred_vertices_bbox = convert_smpl_to_bbox(
                pred_vertices, camScale, camTrans)  #SMPL -> 2D bbox
            pred_vertices_img = convert_bbox_to_oriIm(
                pred_vertices_bbox, boxScale_o2n, bboxTopLeft,
                img_original.shape[1],
                img_original.shape[0])  #2D bbox -> original 2D image

            #Convert joint to original image space (X,Y are aligned to image)
            pred_joints_3d = pred_joints_3d[0].cpu().numpy()  #(1,49,3)
            pred_joints_vis = pred_joints_3d[:, :3]  #(49,3)
            pred_joints_vis_bbox = convert_smpl_to_bbox(
                pred_joints_vis, camScale, camTrans)  #SMPL -> 2D bbox
            pred_joints_vis_img = convert_bbox_to_oriIm(
                pred_joints_vis_bbox, boxScale_o2n, bboxTopLeft,
                img_original.shape[1],
                img_original.shape[0])  #2D bbox -> original 2D image

            ##Output
            predoutput = {}
            predoutput[
                'pred_vertices_img'] = pred_vertices_img  #SMPL vertex in image space
            predoutput[
                'pred_joints_img'] = pred_joints_vis_img  #SMPL joints in image space
            if bExport:
                predoutput['pred_rotmat'] = pred_rotmat.detach().cpu().numpy()
                predoutput['pred_betas'] = pred_betas.detach().cpu().numpy()
                predoutput['pred_camera'] = pred_camera
                predoutput['bbox_xyxy'] = [
                    bbox_XYWH[0], bbox_XYWH[1], bbox_XYWH[0] + bbox_XYWH[2],
                    bbox_XYWH[1] + bbox_XYWH[3]
                ]
                predoutput['bboxTopLeft'] = bboxTopLeft
                predoutput['boxScale_o2n'] = boxScale_o2n

        return predoutput
コード例 #27
0
ファイル: ddp_trainer.py プロジェクト: facebookresearch/eft
    def train_step(self, input_batch):
        self.model.train()

        # Get data from the batch
        images = input_batch['img']  # input image
        gt_keypoints_2d = input_batch[
            'keypoints']  # 2D keypoints           #[N,49,3]
        gt_pose = input_batch[
            'pose']  # SMPL pose parameters                #[N,72]
        gt_betas = input_batch[
            'betas']  # SMPL beta parameters              #[N,10]
        gt_joints = input_batch[
            'pose_3d']  # 3D pose                        #[N,24,4]
        has_smpl = input_batch['has_smpl'].byte(
        ) == 1  # flag that indicates whether SMPL parameters are valid
        has_pose_3d = input_batch['has_pose_3d'].byte(
        ) == 1  # flag that indicates whether 3D pose is valid
        is_flipped = input_batch[
            'is_flipped']  # flag that indicates whether image was flipped during data augmentation
        rot_angle = input_batch[
            'rot_angle']  # rotation angle used for data augmentation
        dataset_name = input_batch[
            'dataset_name']  # name of the dataset the image comes from
        indices = input_batch[
            'sample_index']  # index of example inside its dataset
        batch_size = images.shape[0]

        #Debug temporary scaling for h36m
        # Get GT vertices and model joints
        # Note that gt_model_joints is different from gt_joints as it comes from SMPL
        gt_out = self.smpl(betas=gt_betas,
                           body_pose=gt_pose[:, 3:],
                           global_orient=gt_pose[:, :3])

        gt_model_joints = gt_out.joints.detach()  #[N, 49, 3]
        gt_vertices = gt_out.vertices

        # else:
        #     gt_out = self.smpl(betas=gt_betas, body_pose=gt_pose[:,3:-6], global_orient=gt_pose[:,:3])

        #     gt_model_joints = gt_out.joints.detach()             #[N, 49, 3]
        #     gt_vertices = gt_out.vertices

        # Get current best fits from the dictionary

        opt_pose, opt_betas, opt_validity = self.fits_dict[(dataset_name,
                                                            indices.cpu(),
                                                            rot_angle.cpu(),
                                                            is_flipped.cpu())]
        opt_pose = opt_pose.to(self.device)
        opt_betas = opt_betas.to(self.device)
        # if g_smplx == False:
        opt_output = self.smpl(betas=opt_betas,
                               body_pose=opt_pose[:, 3:],
                               global_orient=opt_pose[:, :3])

        opt_vertices = opt_output.vertices
        opt_joints = opt_output.joints.detach()

        # else:
        #     opt_output = self.smpl(betas=opt_betas, body_pose=opt_pose[:,3:-6], global_orient=opt_pose[:,:3])

        #     opt_vertices = opt_output.vertices
        #     opt_joints = opt_output.joints.detach()

        #assuer that non valid opt has GT values
        if len(has_smpl[opt_validity == 0]) > 0:
            assert min(has_smpl[opt_validity == 0])  #All should be True

        #assuer that non valid opt has GT values
        if len(has_smpl[opt_validity == 0]) > 0:
            assert min(has_smpl[opt_validity == 0])  #All should be True

        # De-normalize 2D keypoints from [-1,1] to pixel space
        gt_keypoints_2d_orig = gt_keypoints_2d.clone()
        gt_keypoints_2d_orig[:, :, :-1] = 0.5 * self.options.img_res * (
            gt_keypoints_2d_orig[:, :, :-1] + 1)

        # Estimate camera translation given the model joints and 2D keypoints
        # by minimizing a weighted least squares loss
        gt_cam_t = estimate_translation(gt_model_joints,
                                        gt_keypoints_2d_orig,
                                        focal_length=self.focal_length,
                                        img_size=self.options.img_res)

        opt_cam_t = estimate_translation(opt_joints,
                                         gt_keypoints_2d_orig,
                                         focal_length=self.focal_length,
                                         img_size=self.options.img_res)

        opt_joint_loss = self.smplify.get_fitting_loss(
            opt_pose,
            opt_betas,
            opt_cam_t,  #opt_pose (N,72)  (N,10)  opt_cam_t: (N,3)
            0.5 * self.options.img_res *
            torch.ones(batch_size, 2, device=self.device),  #(N,2)   (112, 112)
            gt_keypoints_2d_orig).mean(dim=-1)

        # Feed images in the network to predict camera and SMPL parameters
        pred_rotmat, pred_betas, pred_camera = self.model(images)

        # if g_smplx == False: #Original
        pred_output = self.smpl(betas=pred_betas,
                                body_pose=pred_rotmat[:, 1:],
                                global_orient=pred_rotmat[:, 0].unsqueeze(1),
                                pose2rot=False)
        # else:
        #     pred_output = self.smpl(betas=pred_betas, body_pose=pred_rotmat[:,1:-2], global_orient=pred_rotmat[:,0].unsqueeze(1), pose2rot=False)

        pred_vertices = pred_output.vertices
        pred_joints = pred_output.joints

        # Convert Weak Perspective Camera [s, tx, ty] to camera translation [tx, ty, tz] in 3D given the bounding box size
        # This camera translation can be used in a full perspective projection
        pred_cam_t = torch.stack([
            pred_camera[:, 1], pred_camera[:, 2], 2 * self.focal_length /
            (self.options.img_res * pred_camera[:, 0] + 1e-9)
        ],
                                 dim=-1)

        camera_center = torch.zeros(batch_size, 2, device=self.device)
        pred_keypoints_2d = perspective_projection(
            pred_joints,
            rotation=torch.eye(3, device=self.device).unsqueeze(0).expand(
                batch_size, -1, -1),
            translation=pred_cam_t,
            focal_length=self.focal_length,
            camera_center=camera_center)
        # Normalize keypoints to [-1,1]
        pred_keypoints_2d = pred_keypoints_2d / (self.options.img_res / 2.)

        #Weak Projection
        if self.options.bUseWeakProj:
            pred_keypoints_2d = weakProjection_gpu(pred_joints, pred_camera[:,
                                                                            0],
                                                   pred_camera[:,
                                                               1:])  #N, 49, 2

        bFootOriLoss = False
        if bFootOriLoss:  #Ignore hips and hip centers, foot
            # LENGTH_THRESHOLD = 0.0089 #1/112.0     #at least it should be 5 pixel
            #Disable parts
            gt_keypoints_2d[:, 2 + 25, 2] = 0
            gt_keypoints_2d[:, 3 + 25, 2] = 0
            gt_keypoints_2d[:, 14 + 25, 2] = 0

            #Disable Foots
            gt_keypoints_2d[:, 5 + 25, 2] = 0  #Left foot
            gt_keypoints_2d[:, 0 + 25, 2] = 0  #Right foot

        if self.options.run_smplify:

            # Convert predicted rotation matrices to axis-angle
            pred_rotmat_hom = torch.cat([
                pred_rotmat.detach().view(-1, 3, 3).detach(),
                torch.tensor(
                    [0, 0, 1], dtype=torch.float32, device=self.device).view(
                        1, 3, 1).expand(batch_size * 24, -1, -1)
            ],
                                        dim=-1)
            pred_pose = rotation_matrix_to_angle_axis(
                pred_rotmat_hom).contiguous().view(batch_size, -1)
            # tgm.rotation_matrix_to_angle_axis returns NaN for 0 rotation, so manually hack it
            pred_pose[torch.isnan(pred_pose)] = 0.0

            # Run SMPLify optimization starting from the network prediction
            new_opt_vertices, new_opt_joints,\
            new_opt_pose, new_opt_betas,\
            new_opt_cam_t, new_opt_joint_loss = self.smplify(
                                        pred_pose.detach(), pred_betas.detach(),
                                        pred_cam_t.detach(),
                                        0.5 * self.options.img_res * torch.ones(batch_size, 2, device=self.device),
                                        gt_keypoints_2d_orig)
            new_opt_joint_loss = new_opt_joint_loss.mean(dim=-1)

            # Will update the dictionary for the examples where the new loss is less than the current one
            update = (new_opt_joint_loss < opt_joint_loss)
            # print("new_opt_joint_loss{} vs opt_joint_loss{}".format(new_opt_joint_loss))

            if True:  #Visualize opt
                for b in range(batch_size):

                    curImgVis = images[b]  #3,224,224
                    curImgVis = self.de_normalize_img(curImgVis).cpu().numpy()
                    curImgVis = np.transpose(curImgVis, (1, 2, 0)) * 255.0
                    curImgVis = curImgVis[:, :, [2, 1, 0]]

                    #Denormalize image
                    curImgVis = np.ascontiguousarray(curImgVis, dtype=np.uint8)
                    viewer2D.ImShow(curImgVis, name='rawIm')
                    originalImg = curImgVis.copy()

                    pred_camera_vis = pred_camera.detach().cpu().numpy()

                    opt_vert_vis = opt_vertices[b].detach().cpu().numpy()
                    opt_vert_vis *= pred_camera_vis[b, 0]
                    opt_vert_vis[:, 0] += pred_camera_vis[
                        b,
                        1]  #no need +1 (or  112). Rendernig has this offset already
                    opt_vert_vis[:, 1] += pred_camera_vis[
                        b,
                        2]  #no need +1 (or  112). Rendernig has this offset already
                    opt_vert_vis *= 112
                    opt_meshes = {'ver': opt_vert_vis, 'f': self.smpl.faces}

                    gt_vert_vis = gt_vertices[b].detach().cpu().numpy()
                    gt_vert_vis *= pred_camera_vis[b, 0]
                    gt_vert_vis[:, 0] += pred_camera_vis[
                        b,
                        1]  #no need +1 (or  112). Rendernig has this offset already
                    gt_vert_vis[:, 1] += pred_camera_vis[
                        b,
                        2]  #no need +1 (or  112). Rendernig has this offset already
                    gt_vert_vis *= 112
                    gt_meshes = {'ver': gt_vert_vis, 'f': self.smpl.faces}

                    new_opt_output = self.smpl(
                        betas=new_opt_betas,
                        body_pose=new_opt_pose[:, 3:],
                        global_orient=new_opt_pose[:, :3])
                    new_opt_vertices = new_opt_output.vertices
                    new_opt_joints = new_opt_output.joints
                    new_opt_vert_vis = new_opt_vertices[b].detach().cpu(
                    ).numpy()
                    new_opt_vert_vis *= pred_camera_vis[b, 0]
                    new_opt_vert_vis[:, 0] += pred_camera_vis[
                        b,
                        1]  #no need +1 (or  112). Rendernig has this offset already
                    new_opt_vert_vis[:, 1] += pred_camera_vis[
                        b,
                        2]  #no need +1 (or  112). Rendernig has this offset already
                    new_opt_vert_vis *= 112
                    new_opt_meshes = {
                        'ver': new_opt_vert_vis,
                        'f': self.smpl.faces
                    }

                    glViewer.setMeshData(
                        [new_opt_meshes, gt_meshes, new_opt_meshes],
                        bComputeNormal=True)

                    glViewer.setBackgroundTexture(originalImg)
                    glViewer.setWindowSize(curImgVis.shape[1],
                                           curImgVis.shape[0])
                    glViewer.SetOrthoCamera(True)

                    print(has_smpl[b])
                    glViewer.show()

            opt_joint_loss[update] = new_opt_joint_loss[update]
            opt_vertices[update, :] = new_opt_vertices[update, :]
            opt_joints[update, :] = new_opt_joints[update, :]
            opt_pose[update, :] = new_opt_pose[update, :]
            opt_betas[update, :] = new_opt_betas[update, :]
            opt_cam_t[update, :] = new_opt_cam_t[update, :]

            self.fits_dict[(dataset_name, indices.cpu(), rot_angle.cpu(),
                            is_flipped.cpu(),
                            update.cpu())] = (opt_pose.cpu(), opt_betas.cpu())

        else:
            update = torch.zeros(batch_size, device=self.device).byte()

        # Replace the optimized parameters with the ground truth parameters, if available
        opt_vertices[has_smpl, :, :] = gt_vertices[has_smpl, :, :]
        opt_cam_t[has_smpl, :] = gt_cam_t[has_smpl, :]
        opt_joints[has_smpl, :, :] = gt_model_joints[has_smpl, :, :]
        opt_pose[has_smpl, :] = gt_pose[has_smpl, :]
        opt_betas[has_smpl, :] = gt_betas[has_smpl, :]

        # Assert whether a fit is valid by comparing the joint loss with the threshold
        valid_fit = (opt_joint_loss < self.options.smplify_threshold).to(
            self.device)

        if self.options.ablation_no_pseudoGT:
            valid_fit[:] = False  #Disable all pseudoGT

        # Add the examples with GT parameters to the list of valid fits
        valid_fit = valid_fit | has_smpl

        # if len(valid_fit) > sum(valid_fit):
        #     print(">> Rejected fit: {}/{}".format(len(valid_fit) - sum(valid_fit), len(valid_fit) ))

        opt_keypoints_2d = perspective_projection(
            opt_joints,
            rotation=torch.eye(3, device=self.device).unsqueeze(0).expand(
                batch_size, -1, -1),
            translation=opt_cam_t,
            focal_length=self.focal_length,
            camera_center=camera_center)

        opt_keypoints_2d = opt_keypoints_2d / (self.options.img_res / 2.)

        # Compute loss on SMPL parameters
        loss_regr_pose, loss_regr_betas = self.smpl_losses(
            pred_rotmat, pred_betas, opt_pose, opt_betas, valid_fit)

        # Compute 2D reprojection loss for the keypoints
        loss_keypoints = self.keypoint_loss(pred_keypoints_2d, gt_keypoints_2d,
                                            self.options.openpose_train_weight,
                                            self.options.gt_train_weight)

        # Compute 3D keypoint loss
        loss_keypoints_3d = self.keypoint_3d_loss(pred_joints, gt_joints,
                                                  has_pose_3d)

        # Per-vertex loss for the shape
        loss_shape = self.shape_loss(pred_vertices, opt_vertices, valid_fit)

        #Regularization term for shape
        loss_regr_betas_noReject = torch.mean(pred_betas**2)

        # Compute total loss
        # The last component is a loss that forces the network to predict positive depth values
        if self.options.ablation_loss_2dkeyonly:  #2D keypoint only
            loss = self.options.keypoint_loss_weight * loss_keypoints +\
                ((torch.exp(-pred_camera[:,0]*10)) ** 2 ).mean() +\
                    self.options.beta_loss_weight * loss_regr_betas_noReject        #Beta regularization

        elif self.options.ablation_loss_noSMPLloss:  #2D no Pose parameter
            loss = self.options.keypoint_loss_weight * loss_keypoints +\
                self.options.keypoint_loss_weight * loss_keypoints_3d +\
                ((torch.exp(-pred_camera[:,0]*10)) ** 2 ).mean() +\
                self.options.beta_loss_weight * loss_regr_betas_noReject        #Beta regularization

        else:
            loss = self.options.shape_loss_weight * loss_shape +\
                self.options.keypoint_loss_weight * loss_keypoints +\
                self.options.keypoint_loss_weight * loss_keypoints_3d +\
                loss_regr_pose + self.options.beta_loss_weight * loss_regr_betas +\
                ((torch.exp(-pred_camera[:,0]*10)) ** 2 ).mean()

        # loss = self.options.keypoint_loss_weight * loss_keypoints #Debug: 2d error only
        # print("DEBUG: 2donly loss")
        loss *= 60

        # Do backprop
        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()

        # Pack output arguments for tensorboard logging
        output = {
            'pred_vertices': pred_vertices.detach(),
            'opt_vertices': opt_vertices,
            'pred_cam_t': pred_cam_t.detach(),
            'opt_cam_t': opt_cam_t
        }
        losses = {
            'loss': loss.detach().item(),
            'loss_keypoints': loss_keypoints.detach().item(),
            'loss_keypoints_3d': loss_keypoints_3d.detach().item(),
            'loss_regr_pose': loss_regr_pose.detach().item(),
            'loss_regr_betas': loss_regr_betas.detach().item(),
            'loss_shape': loss_shape.detach().item()
        }

        if self.options.bDebug_visEFT:  #g_debugVisualize:    #Debug Visualize input
            for b in range(batch_size):
                #denormalizeImg
                curImgVis = images[b]  #3,224,224
                curImgVis = self.de_normalize_img(curImgVis).cpu().numpy()
                curImgVis = np.transpose(curImgVis, (1, 2, 0)) * 255.0
                curImgVis = curImgVis[:, :, [2, 1, 0]]

                #Denormalize image
                curImgVis = np.ascontiguousarray(curImgVis, dtype=np.uint8)
                viewer2D.ImShow(curImgVis, name='rawIm')
                originalImg = curImgVis.copy()

                # curImgVis = viewer2D.Vis_Skeleton_2D_general(gt_keypoints_2d_orig[b,:,:2].cpu().numpy(), gt_keypoints_2d_orig[b,:,2], bVis= False, image=curImgVis)

                pred_keypoints_2d_vis = pred_keypoints_2d[
                    b, :, :2].detach().cpu().numpy()
                pred_keypoints_2d_vis = 0.5 * self.options.img_res * (
                    pred_keypoints_2d_vis + 1)  #49: (25+24) x 3

                curImgVis = viewer2D.Vis_Skeleton_2D_general(
                    pred_keypoints_2d_vis, bVis=False, image=curImgVis)
                viewer2D.ImShow(curImgVis, scale=2.0, waitTime=1)

                #Get camera pred_params
                pred_camera_vis = pred_camera.detach().cpu().numpy()

                ############### Visualize Mesh ###############
                pred_vert_vis = pred_vertices[b].detach().cpu().numpy()
                # meshVertVis = gt_vertices[b].detach().cpu().numpy()
                # meshVertVis = meshVertVis-pelvis        #centering
                pred_vert_vis *= pred_camera_vis[b, 0]
                pred_vert_vis[:, 0] += pred_camera_vis[
                    b,
                    1]  #no need +1 (or  112). Rendernig has this offset already
                pred_vert_vis[:, 1] += pred_camera_vis[
                    b,
                    2]  #no need +1 (or  112). Rendernig has this offset already
                pred_vert_vis *= 112
                pred_meshes = {'ver': pred_vert_vis, 'f': self.smpl.faces}

                opt_vert_vis = opt_vertices[b].detach().cpu().numpy()
                opt_vert_vis *= pred_camera_vis[b, 0]
                opt_vert_vis[:, 0] += pred_camera_vis[
                    b,
                    1]  #no need +1 (or  112). Rendernig has this offset already
                opt_vert_vis[:, 1] += pred_camera_vis[
                    b,
                    2]  #no need +1 (or  112). Rendernig has this offset already
                opt_vert_vis *= 112
                opt_meshes = {'ver': opt_vert_vis, 'f': self.smpl.faces}

                # glViewer.setMeshData([pred_meshes, opt_meshes], bComputeNormal= True)
                glViewer.setMeshData([pred_meshes, opt_meshes],
                                     bComputeNormal=True)
                # glViewer.setMeshData([opt_meshes], bComputeNormal= True)

                ############### Visualize Skeletons ###############
                #Vis pred-SMPL joint
                pred_joints_vis = pred_joints[
                    b, :, :3].detach().cpu().numpy()  #[N,49,3]
                pred_joints_vis = pred_joints_vis.ravel()[:, np.newaxis]
                #Weak-perspective projection
                pred_joints_vis *= pred_camera_vis[b, 0]
                pred_joints_vis[::3] += pred_camera_vis[b, 1]
                pred_joints_vis[1::3] += pred_camera_vis[b, 2]
                pred_joints_vis *= 112  #112 == 0.5*224
                glViewer.setSkeleton([pred_joints_vis])

                # #GT joint
                gt_jointsVis = gt_joints[b, :, :3].cpu().numpy()  #[N,49,3]
                # gt_pelvis = (gt_smpljointsVis[ 25+2,:] + gt_smpljointsVis[ 25+3,:]) / 2
                # gt_smpljointsVis = gt_smpljointsVis- gt_pelvis
                gt_jointsVis = gt_jointsVis.ravel()[:, np.newaxis]
                gt_jointsVis *= pred_camera_vis[b, 0]
                gt_jointsVis[::3] += pred_camera_vis[b, 1]
                gt_jointsVis[1::3] += pred_camera_vis[b, 2]
                gt_jointsVis *= 112
                glViewer.addSkeleton([gt_jointsVis], jointType='spin')

                # #Vis SMPL's Skeleton
                # gt_smpljointsVis = gt_model_joints[b,:,:3].cpu().numpy()        #[N,49,3]
                # # gt_pelvis = (gt_smpljointsVis[ 25+2,:] + gt_smpljointsVis[ 25+3,:]) / 2
                # # gt_smpljointsVis = gt_smpljointsVis- gt_pelvis
                # gt_smpljointsVis = gt_smpljointsVis.ravel()[:,np.newaxis]
                # gt_smpljointsVis*=pred_camera_vis[b,0]
                # gt_smpljointsVis[::3] += pred_camera_vis[b,1]
                # gt_smpljointsVis[1::3] += pred_camera_vis[b,2]
                # gt_smpljointsVis*=112
                # glViewer.addSkeleton( [gt_smpljointsVis])

                # #Vis GT  joint  (not model (SMPL) joint!!)
                # if has_pose_3d[b]:
                #     gt_jointsVis = gt_model_joints[b,:,:3].cpu().numpy()        #[N,49,3]
                #     # gt_jointsVis = gt_joints[b,:,:3].cpu().numpy()        #[N,49,3]
                #     # gt_pelvis = (gt_jointsVis[ 25+2,:] + gt_jointsVis[ 25+3,:]) / 2
                #     # gt_jointsVis = gt_jointsVis- gt_pelvis

                #     gt_jointsVis = gt_jointsVis.ravel()[:,np.newaxis]
                #     gt_jointsVis*=pred_camera_vis[b,0]
                #     gt_jointsVis[::3] += pred_camera_vis[b,1]
                #     gt_jointsVis[1::3] += pred_camera_vis[b,2]
                #     gt_jointsVis*=112

                #     glViewer.addSkeleton( [gt_jointsVis])
                # # glViewer.show()

                glViewer.setBackgroundTexture(originalImg)
                glViewer.setWindowSize(curImgVis.shape[1], curImgVis.shape[0])
                glViewer.SetOrthoCamera(True)
                glViewer.show(0)

                # continue

        return output, losses
コード例 #28
0
ファイル: visualizer.py プロジェクト: zhly0/eft
    def visualize(self, image, instances, mesh_fname=""):
        """
        """
        pred_camera = self.cam_all[0]
        camParam_scale = pred_camera[0]
        camParam_trans = pred_camera[1:]

        pred_vert_vis = self.vert_all[0]

        # smpl_joints_3d_vis = smpl_joints_3d

        draw_onbbox = True
        draw_rawimg = True

        if draw_onbbox:
            pred_vert_vis = convert_smpl_to_bbox(pred_vert_vis, camParam_scale,
                                                 camParam_trans)
            smpl_joints_3d_vis = convert_smpl_to_bbox(smpl_joints_3d_vis,
                                                      camParam_scale,
                                                      camParam_trans)
            renderer.setBackgroundTexture(croppedImg)
            renderer.setViewportSize(croppedImg.shape[1], croppedImg.shape[0])

            pred_vert_vis *= MAGNIFY_RATIO

        if draw_rawimg:
            #Covert SMPL to BBox first
            pred_vert_vis = convert_smpl_to_bbox(pred_vert_vis, camParam_scale,
                                                 camParam_trans)
            smpl_joints_3d_vis = convert_smpl_to_bbox(smpl_joints_3d_vis,
                                                      camParam_scale,
                                                      camParam_trans)

            #From cropped space to original
            pred_vert_vis = convert_bbox_to_oriIm(pred_vert_vis, boxScale_o2n,
                                                  bboxTopLeft, rawImg.shape[1],
                                                  rawImg.shape[0])
            smpl_joints_3d_vis = convert_bbox_to_oriIm(smpl_joints_3d_vis,
                                                       boxScale_o2n,
                                                       bboxTopLeft,
                                                       rawImg.shape[1],
                                                       rawImg.shape[0])
            renderer.setBackgroundTexture(rawImg)
            renderer.setViewportSize(rawImg.shape[1], rawImg.shape[0])

            #In orthographic model. XY of 3D is just 2D projection
            smpl_joints_2d_vis = conv_3djoint_2djoint(smpl_joints_3d_vis,
                                                      rawImg.shape)
            # image_2dkeypoint_pred = viewer2D.Vis_Skeleton_2D_smpl45(smpl_joints_2d_vis, image=rawImg.copy(),color=(0,255,255))
            image_2dkeypoint_pred = viewer2D.Vis_Skeleton_2D_Openpose18(
                smpl_joints_2d_vis, image=rawImg.copy(),
                color=(255, 0, 0))  #All 2D joint
            image_2dkeypoint_pred = viewer2D.Vis_Skeleton_2D_Openpose18(
                smpl_joints_2d_vis,
                pt2d_visibility=keypoint_2d_validity,
                image=image_2dkeypoint_pred,
                color=(0, 255, 255))  #Only valid
            viewer2D.ImShow(image_2dkeypoint_pred,
                            name='keypoint_2d_pred',
                            waitTime=1)

        pred_meshes = {'ver': pred_vert_vis, 'f': smpl.faces}
        v = pred_meshes['ver']
        f = pred_meshes['f']

        #Visualize in the original image space
        renderer.set_mesh(v, f)
        renderer.showBackground(True)
        renderer.setWorldCenterBySceneCenter()
        renderer.setCameraViewMode("cam")

        #Set image size for rendering
        if args.onbbox:
            renderer.setViewportSize(croppedImg.shape[1], croppedImg.shape[0])
        else:
            renderer.setViewportSize(rawImg.shape[1], rawImg.shape[0])

        renderer.display()
        renderImg = renderer.get_screen_color_ibgr()
        viewer2D.ImShow(renderImg, waitTime=1)
コード例 #29
0
def visEFT_singleSubject(renderer):
    inputDir = args.fit_dir
    imgDir = args.img_dir

    smplModelDir = args.smpl_dir
    smpl = SMPL(smplModelDir, batch_size=1, create_transl=False)

    # outputFolder = os.path.basename(inputDir) + '_dpOut'
    # outputFolder =os.path.join('/run/media/hjoo/disk/data/eftout/',outputFolder)
    
    eft_fileList  = listdir(inputDir)       #Check all fitting files
    print(">> Found {} files in the fitting folder {}".format(len(eft_fileList), inputDir))
    totalCnt =0
    erroneousCnt =0

    for idx, f in enumerate(sorted(eft_fileList)):
        
        #Load EFT data
        fileFullPath = join(inputDir, f)
        with open(fileFullPath,'rb') as f:
            eft_data = pickle.load(f)

        #Get raw image path
        imgFullPath = eft_data['imageName'][0]
        imgName = os.path.basename(imgFullPath)
        imgFullPath =os.path.join(imgDir, os.path.basename(imgFullPath) )
        assert os.path.exists(imgFullPath)
        rawImg = cv2.imread(imgFullPath)
        print(f'Input image: {imgFullPath}')

        #EFT data
        bbox_scale = eft_data['scale'][0]
        bbox_center = eft_data['center'][0]

        pred_camera = eft_data['pred_camera']
        pred_betas = torch.from_numpy(eft_data['pred_shape'])
        pred_pose_rotmat = torch.from_numpy(eft_data['pred_pose_rotmat'])        

        #COCO only. Annotation index
        print("COCO annotId: {}".format(eft_data['annotId']))

        #Obtain skeleton and smpl data
        smpl_output = smpl(betas=pred_betas, body_pose=pred_pose_rotmat[:,1:], global_orient=pred_pose_rotmat[:,0].unsqueeze(1), pose2rot=False )
        smpl_vertices = smpl_output.vertices.detach().cpu().numpy() 
        smpl_joints_3d = smpl_output.joints.detach().cpu().numpy() 

        #Crop image
        croppedImg, boxScale_o2n, bboxTopLeft = crop_bboxInfo(rawImg, bbox_center, bbox_scale, (BBOX_IMG_RES, BBOX_IMG_RES) )

        ########################
        # Visualize
        if False:
            #Compute 2D reprojection error
            # if not (data['loss_keypoints_2d']<0.0001 or data['loss_keypoints_2d']>0.001 :
            #     continue
            maxBeta = abs(torch.max( abs(pred_betas)).item())
            if eft_data['loss_keypoints_2d']>0.0005 or maxBeta>3:
                erroneousCnt +=1
            print(">>> loss2d: {}, maxBeta: {}".format( eft_data['loss_keypoints_2d'],maxBeta) )
        
        # Visualize 2D image
        if False:
            viewer2D.ImShow(rawImg, name='rawImg', waitTime=1)      #You should press any key 
            viewer2D.ImShow(croppedImg, name='croppedImg', waitTime=1)

        # Visualization Mesh
        if True:    
            b=0
            camParam_scale = pred_camera[b,0]
            camParam_trans = pred_camera[b,1:]
            pred_vert_vis = smpl_vertices[b]
            smpl_joints_3d_vis = smpl_joints_3d[b]

            if args.onbbox:
                pred_vert_vis = convert_smpl_to_bbox(pred_vert_vis, camParam_scale, camParam_trans)
                smpl_joints_3d_vis = convert_smpl_to_bbox(smpl_joints_3d_vis, camParam_scale, camParam_trans)
                renderer.setBackgroundTexture(croppedImg)
                renderer.setViewportSize(croppedImg.shape[1], croppedImg.shape[0])
            else:
                #Covert SMPL to BBox first
                pred_vert_vis = convert_smpl_to_bbox(pred_vert_vis, camParam_scale, camParam_trans)
                smpl_joints_3d_vis = convert_smpl_to_bbox(smpl_joints_3d_vis, camParam_scale, camParam_trans)

                #From cropped space to original
                pred_vert_vis = convert_bbox_to_oriIm(pred_vert_vis, boxScale_o2n, bboxTopLeft, rawImg.shape[1], rawImg.shape[0]) 
                smpl_joints_3d_vis = convert_bbox_to_oriIm(smpl_joints_3d_vis, boxScale_o2n, bboxTopLeft, rawImg.shape[1], rawImg.shape[0])
                renderer.setBackgroundTexture(rawImg)
                renderer.setViewportSize(rawImg.shape[1], rawImg.shape[0])

            pred_meshes = {'ver': pred_vert_vis, 'f': smpl.faces}
            v = pred_meshes['ver'] 
            f = pred_meshes['f']

            #Visualize in the original image space
            renderer.set_mesh(v,f)
            renderer.showBackground(True)
            renderer.setWorldCenterBySceneCenter()
            renderer.setCameraViewMode("cam")

            renderer.setViewportSize(rawImg.shape[1], rawImg.shape[0])
            renderer.display()
            renderImg = renderer.get_screen_color_ibgr()
            viewer2D.ImShow(renderImg,waitTime=1)

            # out_all_f = render.get_z_value()

        # Visualization Mesh on side view
        if True:
            # renderer.set_viewpoint()
            renderer.showBackground(False)
            renderer.setWorldCenterBySceneCenter()
            renderer.setCameraViewMode("side")

            renderer.setViewportSize(rawImg.shape[1], rawImg.shape[0])
            renderer.display()
            sideImg = renderer.get_screen_color_ibgr()        #Overwite on rawImg
            viewer2D.ImShow(sideImg,waitTime=1)
            
            sideImg = cv2.resize(sideImg, (renderImg.shape[1], renderImg.shape[0]) )

        saveImg = np.concatenate( (renderImg,sideImg), axis =1)
        viewer2D.ImShow(saveImg,waitTime=0)

        if True:    #Save the rendered image to files
            if os.path.exists(args.render_dir) == False:
                os.mkdir(args.render_dir)
            render_output_path = args.render_dir + '/render_{:08d}.jpg'.format(idx)
            print(f"Save to {render_output_path}")
            cv2.imwrite(render_output_path, saveImg)

    print("erroneous Num : {}/{} ({} percent)".format(erroneousCnt,totalCnt, float(erroneousCnt)*100/totalCnt))
コード例 #30
0
ファイル: pennaction.py プロジェクト: facebookresearch/eft
def pennaction_extract(cocoPose3DAll, imgRootDir, out_path):

    # 0head
    # 1left_shoulder
    # 2right_shoulder
    # 3left_elbow
    # 4right_elbow
    # 5left_wrist
    # 6right_wrist
    # 7left_hip
    # 8right_hip
    # 9left_knee
    # 10right_knee
    # 11left_ankle
    # 12right_ankle

    # convert joints to global order
    # joints_idx = [ 18, 9, 8, 10 ,7, 11, 6, 3,2 ,4, 1, 5, 0]       #Left right were flipped...wrong
    joints_idx = [18, 8, 9, 7, 10, 6, 11, 2, 3, 1, 4, 0, 5]

    # bbox expansion factor
    scaleFactor = 1.2

    # structs we need
    imgnames_, scales_, centers_, parts_, openposes_, annot_ids_ = [], [], [], [], [], []
    subjectIds_ = []

    for annot in cocoPose3DAll:
        # keypoints processing
        keypoints = annot['keypoints']
        keypoints[keypoints[:, 2] > 0, 2] = 1

        #disable head
        keypoints[0, 2] = 0

        #Change the following to select a subset of coco
        # if sum(keypoints[5:,2]>0) < 12:   #Original: cases that all body limbs are annotated
        #     continue
        # if sum(keypoints[5:,2]>0) >= 12:   #If all parts are valid. skip. we already have this
        #     continue
        # if sum(keypoints[5:,2]>0) < 6:   #At least 6 joints should be there
        #     continue
        # image name
        img_name_full = annot['imgname']
        img_name_full = os.path.join(
            os.path.basename(os.path.dirname(img_name_full)),
            os.path.basename(img_name_full))

        # keypoints
        part = np.zeros([24, 3])
        part[joints_idx] = keypoints

        # scale and center
        bbox_xyxy = annot['bbox_xyxy']  #X,Y,W,H
        bbox_xywh = [
            bbox_xyxy[0], bbox_xyxy[1], bbox_xyxy[2] - bbox_xyxy[0],
            bbox_xyxy[3] - bbox_xyxy[1]
        ]
        center = [
            bbox_xywh[0] + bbox_xywh[2] / 2, bbox_xywh[1] + bbox_xywh[3] / 2
        ]
        scale = scaleFactor * max(bbox_xywh[2], bbox_xywh[3]) / 200
        openpose = np.zeros([25, 3])  #blank

        if False:  #visualize
            imgPath = os.path.join(imgRootDir, img_name_full)
            raw_img = cv2.imread(imgPath)
            raw_img = viewer2D.Vis_Skeleton_2D_SPIN24(part[:, :2],
                                                      pt2d_visibility=part[:,
                                                                           2],
                                                      image=raw_img)
            # raw_img = viewer2D.Vis_Skeleton_2D_foot(foot_kp[:,:-1], foot_kp[:,-1] , image= raw_img)
            # raw_img = viewer2D.Vis_Skeleton_2D_Openpose25(openpose[:,:-1], openpose[:,-1] , image= raw_img)
            viewer2D.ImShow(raw_img, waitTime=1)

        subjectid = annot['subjectId']
        # subjectid = "{}-id{:03}".format(seqName,trackid)

        # store data
        subjectIds_.append(subjectid)
        imgnames_.append(img_name_full)
        annot_ids_.append(-1)
        centers_.append(center)
        scales_.append(scale)
        parts_.append(part)
        openposes_.append(openpose)

    # store the data struct
    if not os.path.isdir(out_path):
        os.makedirs(out_path)
    out_file = os.path.join(out_path, 'pennaction.npz')
    np.savez(out_file,
             imgname=imgnames_,
             center=centers_,
             scale=scales_,
             part=parts_,
             subjectIds=subjectIds_,
             openpose=openposes_,
             annotIds=annot_ids_)