示例#1
0
def visEFT_singleSubject(inputDir, imDir, smplModelDir, bUseSMPLX):
    if bUseSMPLX:
        smpl = SMPLX(smplModelDir, batch_size=1, create_transl=False)
    else:
        smpl = SMPL(smplModelDir, batch_size=1, create_transl=False)
    fileList  = listdir(inputDir)       #Check all fitting files

    print(">> Found {} files in the fitting folder {}".format(len(fileList), inputDir))
    totalCnt =0
    erroneousCnt =0
    # fileList =['00_00_00008422_0.pkl', '00_00_00008422_1731.pkl', '00_00_00008422_3462.pkl']     #debug
    for f in sorted(fileList):
        
        #Load
        fileFullPath = join(inputDir, f)
        with open(fileFullPath,'rb') as f:
            dataDict = pickle.load(f)
        print(f"Loaded :{fileFullPath}")
        if 'imageName' in dataDict.keys():  #If this pkl has only one instance. Made this to hand panoptic output where pkl has multi instances
            dataDict = {0:dataDict}

        for jj, k in enumerate(dataDict):
            if jj%50 !=0:
                continue
            data = dataDict[k]
            # print(data['subjectId'])
            # continue
            if 'smpltype' in data:
                if (data['smpltype'] =='smpl' and bUseSMPLX) or (data['smpltype'] =='smplx' and bUseSMPLX==False):
                    print("SMPL type mismatch error")
                    assert False

            imgFullPathOri = data['imageName'][0]
            imgFullPath = os.path.join(imDir, os.path.basename(imgFullPathOri))


            data['subjectId'] =0 #TODO debug

            fileName = "{}_{}".format(data['subjectId'],  os.path.basename(imgFullPathOri)[:-4])
            if args.bRenderToFiles and os.path.exists(os.path.join(render_dirName, fileName+".jpg")):
                continue

            if True:    #Additional path checking, if not valid
                if os.path.exists(imgFullPath) == False:
                    imgFullPath =getpath_level(imDir, imgFullPathOri ,1)
                if os.path.exists(imgFullPath) == False:
                    imgFullPath =getpath_level(imDir, imgFullPathOri,2)
                if os.path.exists(imgFullPath) == False:
                    imgFullPath =getpath_level(imDir, imgFullPathOri, 3 )
                
            scale = data['scale'][0]
            center = data['center'][0]

            # print(data['annotId'])
            ours_betas = torch.from_numpy(data['pred_shape'])
            ours_pose_rotmat = torch.from_numpy(data['pred_pose_rotmat'])
            # spin_betas = torch.from_numpy(data['opt_beta'])
            
            #Compute 2D reprojection error
            # if not (data['loss_keypoints_2d']<0.0001 or data['loss_keypoints_2d']>0.001 :
            #     continue
            maxBeta = abs(torch.max( abs(ours_betas)).item())

            if data['loss_keypoints_2d']>0.0005 or maxBeta>3:
                erroneousCnt +=1
            
            print(">>> loss2d: {}, maxBeta: {}".format( data['loss_keypoints_2d'],maxBeta) )

            # spin_pose = torch.from_numpy(data['opt_pose'])
            pred_camera_vis = data['pred_camera']
    
            if os.path.exists(imgFullPath) == False:
                print(imgFullPath)
                assert os.path.exists(imgFullPath)
            rawImg = cv2.imread(imgFullPath)
            print(imgFullPath)

            croppedImg, boxScale_o2n, bboxTopLeft = crop_bboxInfo(rawImg, center, scale, (BBOX_IMG_RES, BBOX_IMG_RES) )

            #Visualize 2D image
            if args.bRenderToFiles ==False:
                viewer2D.ImShow(rawImg, name='rawImg', waitTime=10)      #You should press any key 
                viewer2D.ImShow(croppedImg, name='croppedImg', waitTime=10)

            ours_output = smpl(betas=ours_betas, body_pose=ours_pose_rotmat[:,1:], global_orient=ours_pose_rotmat[:,0].unsqueeze(1), pose2rot=False )
            ours_vertices = ours_output.vertices.detach().cpu().numpy() 
            ours_joints_3d = ours_output.joints.detach().cpu().numpy() 

            #Visualize 3D mesh and 3D skeleton in BBox Space
            if True:
                b =0
                camParam_scale = pred_camera_vis[b,0]
                camParam_trans = pred_camera_vis[b,1:]

                ############### Visualize Mesh ############### 
                pred_vert_vis = ours_vertices[b].copy()
                pred_vert_vis = convert_smpl_to_bbox(pred_vert_vis, camParam_scale, camParam_trans)
                pred_meshes = {'ver': pred_vert_vis, 'f': smpl.faces}
                glViewer.setMeshData([pred_meshes], bComputeNormal= True)

                ################ Visualize Skeletons ############### 
                #Vis pred-SMPL joint
                pred_joints_vis = ours_joints_3d[b,:,:3].copy()     #(N,3)
                pred_joints_vis = convert_smpl_to_bbox(pred_joints_vis, camParam_scale, camParam_trans)
                glViewer.setSkeleton( [pred_joints_vis.ravel()[:,np.newaxis]])

                ################ Other 3D setup############### 
                glViewer.setBackgroundTexture(croppedImg)
                glViewer.setWindowSize(croppedImg.shape[1]*args.windowscale, croppedImg.shape[0]*args.windowscale)
                glViewer.SetOrthoCamera(True)

                print("Press 'q' in the 3D window to go to the next sample")
                glViewer.show(0)
            
            #Visualize 3D mesh and 3D skeleton on original image space
            if True:
                b =0
                camParam_scale = pred_camera_vis[b,0]
                camParam_trans = pred_camera_vis[b,1:]

                ############### Visualize Mesh ############### 
                pred_vert_vis = ours_vertices[b].copy()
                pred_vert_vis = convert_smpl_to_bbox(pred_vert_vis, camParam_scale, camParam_trans)

                #From cropped space to original
                pred_vert_vis = convert_bbox_to_oriIm(pred_vert_vis, boxScale_o2n, bboxTopLeft, rawImg.shape[1], rawImg.shape[0]) 
                pred_meshes = {'ver': pred_vert_vis, 'f': smpl.faces}
                glViewer.setMeshData([pred_meshes], bComputeNormal= True)

                # ################ Visualize Skeletons ############### 
                #Vis pred-SMPL joint
                pred_joints_vis = ours_joints_3d[b,:,:3].copy()     #(N,3)
                pred_joints_vis = convert_smpl_to_bbox(pred_joints_vis, camParam_scale, camParam_trans)
                pred_joints_vis = convert_bbox_to_oriIm(pred_joints_vis, boxScale_o2n, bboxTopLeft, rawImg.shape[1], rawImg.shape[0]) 


                glViewer.setSkeleton( [pred_joints_vis.ravel()[:,np.newaxis]])

                glViewer.setBackgroundTexture(rawImg)
                glViewer.setWindowSize(rawImg.shape[1]*args.magnifyFactor, rawImg.shape[0]*args.magnifyFactor)
                glViewer.SetOrthoCamera(True)

                print("Press 'q' in the 3D window to go to the next sample")

                if args.bRenderToFiles:        #Export rendered files
                    if os.path.exists(render_dirName) == False:     #make a output folder if necessary
                         os.mkdir(render_dirName)

                    # subjId = data['subjectId'][22:24]
                    fileName = "{}_{}".format(data['subjectId'],  os.path.basename(imgFullPathOri)[:-4])

                    # rawImg = cv2.putText(rawImg,data['subjectId'],(100,100), cv2.FONT_HERSHEY_PLAIN, 2, (255,255,0),2)
                    glViewer.render_on_image(render_dirName, fileName, rawImg)
                    print(f"Render to {fileName}")
示例#2
0
    def init_fn(self):
        self.train_ds = MixedDataset(self.options,
                                     ignore_3d=self.options.ignore_3d,
                                     is_train=True)

        self.model = hmr(config.SMPL_MEAN_PARAMS,
                         pretrained=True).to(self.device)

        if self.options.bExemplarMode:
            lr = 5e-5 * 0.2
        else:
            lr = self.options.lr
        self.optimizer = torch.optim.Adam(
            params=self.model.parameters(),
            #   lr=self.options.lr,
            lr=lr,
            weight_decay=0)

        if self.options.bUseSMPLX:  #SMPL-X model           #No change is required for HMR training. SMPL-X ignores hand and other parts.
            #SMPL uses 23 joints, while SMPL-X uses 21 joints, automatically ignoring the last two joints of SMPL
            self.smpl = SMPLX(config.SMPL_MODEL_DIR,
                              batch_size=self.options.batch_size,
                              create_transl=False).to(self.device)
        else:  #Original SMPL
            self.smpl = SMPL(config.SMPL_MODEL_DIR,
                             batch_size=self.options.batch_size,
                             create_transl=False).to(self.device)

        # Per-vertex loss on the shape
        self.criterion_shape = nn.L1Loss().to(self.device)
        # Keypoint (2D and 3D) loss
        # No reduction because confidence weighting needs to be applied
        self.criterion_keypoints = nn.MSELoss(reduction='none').to(self.device)
        # Loss for SMPL parameter regression
        self.criterion_regr = nn.MSELoss().to(self.device)
        self.models_dict = {'model': self.model}
        self.optimizers_dict = {'optimizer': self.optimizer}
        self.focal_length = constants.FOCAL_LENGTH

        # Initialize SMPLify fitting module
        self.smplify = SMPLify(step_size=1e-2,
                               batch_size=self.options.batch_size,
                               num_iters=self.options.num_smplify_iters,
                               focal_length=self.focal_length)
        if self.options.pretrained_checkpoint is not None:
            print(">>> Load Pretrained mode: {}".format(
                self.options.pretrained_checkpoint))
            self.load_pretrained(
                checkpoint_file=self.options.pretrained_checkpoint)
            self.backupModel()

        #This should be called here after loading model
        if torch.cuda.device_count() > 1:
            print("Let's use", torch.cuda.device_count(), "GPUs!")
            self.model = torch.nn.DataParallel(self.model)  #Failed...

        # Load dictionary of fits
        self.fits_dict = FitsDict(self.options, self.train_ds)

        # Create renderer
        self.renderer = None  # Renderer(focal_length=self.focal_length, img_res=self.options.img_res, faces=self.smpl.faces)

        #debug
        from torchvision.transforms import Normalize
        self.de_normalize_img = Normalize(mean=[
            -constants.IMG_NORM_MEAN[0] / constants.IMG_NORM_STD[0],
            -constants.IMG_NORM_MEAN[1] / constants.IMG_NORM_STD[1],
            -constants.IMG_NORM_MEAN[2] / constants.IMG_NORM_STD[2]
        ],
                                          std=[
                                              1 / constants.IMG_NORM_STD[0],
                                              1 / constants.IMG_NORM_STD[1],
                                              1 / constants.IMG_NORM_STD[2]
                                          ])
示例#3
0
def visEFT_multiSubjects(inputDir, imDir, smplModelDir, bUseSMPLX = False):

    if bUseSMPLX:
        smpl = SMPLX(smplModelDir, batch_size=1, create_transl=False)
    else:
        smpl = SMPL(smplModelDir, batch_size=1, create_transl=False)

    fileList  = listdir(inputDir)       #Check all fitting files

    print(">> Found {} files in the fitting folder {}".format(len(fileList), inputDir))
    totalCnt =0
    erroneousCnt =0
    #Merge sample from the same image

    data_perimage ={}
    for f in sorted(fileList):
        
        if "_init" in f:
            continue
        #Load
        imageName = f[:f.rfind('_')]
        if imageName not in data_perimage.keys():
            data_perimage[imageName] =[]

        data_perimage[imageName].append(f)

    for imgName in data_perimage:

        eftFileNames = data_perimage[imgName]

        meshData =[]
        skelData =[]
        for f in eftFileNames:
            fileFullPath = join(inputDir, f)
            with open(fileFullPath,'rb') as f:
                data = pickle.load(f)
            imgFullPathOri = data['imageName'][0]
            imgFullPath = os.path.join(imDir, os.path.basename(imgFullPathOri))

            
            if True:    #Additional path checking, if not valid
                if os.path.exists(imgFullPath) == False:
                    imgFullPath =getpath_level(imDir, imgFullPathOri ,1)
                if os.path.exists(imgFullPath) == False:
                    imgFullPath =getpath_level(imDir, imgFullPathOri,2)
                if os.path.exists(imgFullPath) == False:
                    imgFullPath =getpath_level(imDir, imgFullPathOri, 3 )

            scale = data['scale'][0]
            center = data['center'][0]

            ours_betas = torch.from_numpy(data['pred_shape'])
            ours_pose_rotmat = torch.from_numpy(data['pred_pose_rotmat'])
            # spin_betas = torch.from_numpy(data['opt_beta'])
            
            #Compute 2D reprojection error
            # if not (data['loss_keypoints_2d']<0.0001 or data['loss_keypoints_2d']>0.001 :
            #     continue
            maxBeta = abs(torch.max( abs(ours_betas)).item())

            if data['loss_keypoints_2d']>0.0005 or maxBeta>3:
                erroneousCnt +=1
            
            print(">>> loss2d: {}, maxBeta: {}".format( data['loss_keypoints_2d'],maxBeta) )

            # spin_pose = torch.from_numpy(data['opt_pose'])
            pred_camera_vis = data['pred_camera']
    
            assert os.path.exists(imgFullPath)
            rawImg = cv2.imread(imgFullPath)
            print(imgFullPath)

            croppedImg, boxScale_o2n, bboxTopLeft = crop_bboxInfo(rawImg, center, scale, (constants.IMG_RES, constants.IMG_RES) )

            #Visualize 2D image
            if args.bRenderToFiles ==False:
                viewer2D.ImShow(rawImg, name='rawImg', waitTime=10)      #You should press any key 
                viewer2D.ImShow(croppedImg, name='croppedImg', waitTime=10)

            if bUseSMPLX:
                ours_output = smpl(betas=ours_betas, body_pose=ours_pose_rotmat[:,1:-2], global_orient=ours_pose_rotmat[:,0].unsqueeze(1), pose2rot=False )
                # ours_output = smpl()        #Default test
            else:
                ours_output = smpl(betas=ours_betas, body_pose=ours_pose_rotmat[:,1:], global_orient=ours_pose_rotmat[:,0].unsqueeze(1), pose2rot=False )
                # ours_output = smpl()        #Default test
            ours_vertices = ours_output.vertices.detach().cpu().numpy() 
            ours_joints_3d = ours_output.joints.detach().cpu().numpy() 

            if False:   #Debugging
                # ours_vertices = ours_vertices - ours_joints_3d[0,12,:]
                save_mesh_obj(ours_vertices[0], smpl.faces, 'test.obj')

            #Visualize 3D mesh and 3D skeleton on original image space
            if True:
                b =0
                camParam_scale = pred_camera_vis[b,0]
                camParam_trans = pred_camera_vis[b,1:]

                ############### Visualize Mesh ############### 
                pred_vert_vis = ours_vertices[b].copy()
                pred_vert_vis = convert_smpl_to_bbox(pred_vert_vis, camParam_scale, camParam_trans)

                #From cropped space to original
                pred_vert_vis = convert_bbox_to_oriIm(pred_vert_vis, boxScale_o2n, bboxTopLeft, rawImg.shape[1], rawImg.shape[0]) 
                pred_meshes = {'ver': pred_vert_vis, 'f': smpl.faces}
                # glViewer.setMeshData([pred_meshes], bComputeNormal= True)

                # ################ Visualize Skeletons ############### 
                #Vis pred-SMPL joint
                # pred_joints_vis = ours_joints_3d[b,-9:,:3].copy()     #(N,3)        #Debuggin
                pred_joints_vis = ours_joints_3d[b,:,:3].copy()     #(N,3)
                pred_joints_vis = convert_smpl_to_bbox(pred_joints_vis, camParam_scale, camParam_trans)
                pred_joints_vis = convert_bbox_to_oriIm(pred_joints_vis, boxScale_o2n, bboxTopLeft, rawImg.shape[1], rawImg.shape[0]) 


                meshData.append(pred_meshes)
                skelData.append(pred_joints_vis.ravel()[:,np.newaxis])
                # glViewer.setSkeleton( [pred_joints_vis.ravel()[:,np.newaxis]])

                glViewer.setBackgroundTexture(rawImg)
                glViewer.setWindowSize(rawImg.shape[1]*args.magnifyFactor, rawImg.shape[0]*args.magnifyFactor)
                glViewer.SetOrthoCamera(True)

                # print("Press 'q' in the 3D window to go to the next sample")
                # glViewer.show(0)
        glViewer.setSkeleton(skelData)
        glViewer.setMeshData(meshData, bComputeNormal= True)

        if args.bRenderToFiles:        #Export rendered files
            if os.path.exists(render_dirName) == False:     #make a output folder if necessary
                    os.mkdir(render_dirName)
            fileName = imgFullPathOri[:-4].replace("/","_")
            glViewer.render_on_image(render_dirName, fileName, rawImg)
            print(f"render to {fileName}")

        glViewer.show(args.displaytime)
示例#4
0
def run_evaluation(model, dataset_name, dataset, result_file,
                   batch_size=32, img_res=224, 
                   num_workers=32, shuffle=False, log_freq=50, bVerbose= True):
    """Run evaluation on the datasets and metrics we report in the paper. """

    print(dataset_name)


    device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
    # # Transfer model to the GPU
    # model.to(device)

    # Load SMPL model
    global g_smpl_neutral, g_smpl_male, g_smpl_female
    if g_smpl_neutral is None:
        g_smpl_neutral = SMPL(config.SMPL_MODEL_DIR,
                            create_transl=False).to(device)
        
        # g_smpl_neutral = SMPLX(config.SMPL_MODEL_DIR,
        #                     create_transl=False).to(device)
                                     
        g_smpl_male = SMPL(config.SMPL_MODEL_DIR,
                        gender='male',
                        create_transl=False).to(device)
        g_smpl_female = SMPL(config.SMPL_MODEL_DIR,
                        gender='female',
                        create_transl=False).to(device)

        smpl_neutral = g_smpl_neutral
        smpl_male = g_smpl_male
        smpl_female = g_smpl_female
    else:
        smpl_neutral = g_smpl_neutral
        smpl_male = g_smpl_male
        smpl_female = g_smpl_female

    # renderer = PartRenderer()    
    # Regressor for H36m joints
    J_regressor = torch.from_numpy(np.load(config.JOINT_REGRESSOR_H36M)).float()
    
    save_results = result_file is not None
    # Disable shuffling if you want to save the results
    if save_results:
        shuffle=False
    # Create dataloader for the dataset
    data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
    
    # Pose metrics
    # MPJPE and Reconstruction error for the non-parametric and parametric shapes
    # mpjpe = np.zeros(len(dataset))
    # recon_err = np.zeros(len(dataset))
    quant_mpjpe = {}#np.zeros(len(dataset))
    quant_recon_err = {}#np.zeros(len(dataset))
    mpjpe = np.zeros(len(dataset))
    recon_err = np.zeros(len(dataset))

    mpjpe_smpl = np.zeros(len(dataset))
    recon_err_smpl = np.zeros(len(dataset))

    # Shape metrics
    # Mean per-vertex error
    shape_err = np.zeros(len(dataset))
    shape_err_smpl = np.zeros(len(dataset))

    # Mask and part metrics
    # Accuracy
    accuracy = 0.
    parts_accuracy = 0.
    # True positive, false positive and false negative
    tp = np.zeros((2,1))
    fp = np.zeros((2,1))
    fn = np.zeros((2,1))
    parts_tp = np.zeros((7,1))
    parts_fp = np.zeros((7,1))
    parts_fn = np.zeros((7,1))
    # Pixel count accumulators
    pixel_count = 0
    parts_pixel_count = 0

    # Store SMPL parameters
    smpl_pose = np.zeros((len(dataset), 72))
    smpl_betas = np.zeros((len(dataset), 10))
    smpl_camera = np.zeros((len(dataset), 3))
    pred_joints = np.zeros((len(dataset), 17, 3))

    eval_pose = False
    eval_masks = False
    eval_parts = False
    # Choose appropriate evaluation for each dataset
    if dataset_name == 'h36m-p1' or dataset_name == 'h36m-p2'  or dataset_name == 'lspet-test' \
                        or dataset_name == '3dpw' or dataset_name == 'coco2014-val-3d-amt'  or dataset_name == 'ochuman-test' \
                        or dataset_name == '3dpw-vibe'  or dataset_name == '3dpw-crop' or dataset_name == '3dpw-headcrop' or dataset_name == 'mpi-inf-3dhp-test':
        eval_pose = True
    elif dataset_name == 'lsp':
        eval_masks = True
        eval_parts = True
        annot_path = config.DATASET_FOLDERS['upi-s1h']

    joint_mapper_h36m = constants.H36M_TO_J17 if dataset_name == 'mpi-inf-3dhp-test' else constants.H36M_TO_J14
    joint_mapper_gt = constants.J24_TO_J17 if dataset_name == 'mpi-inf-3dhp-test' else constants.J24_TO_J14
    # Iterate over the entire dataset
    for step, batch in enumerate(tqdm(data_loader, desc='Eval', total=len(data_loader))):
        # Get ground truth annotations from the batch

        imgName = batch['imgname'][0]
        seqName = os.path.basename ( os.path.dirname(imgName) )

        gt_pose = batch['pose'].to(device)
        gt_betas = batch['betas'].to(device)
        gt_vertices = smpl_neutral(betas=gt_betas, body_pose=gt_pose[:, 3:], global_orient=gt_pose[:, :3]).vertices
        images = batch['img'].to(device)
        gender = batch['gender'].to(device)
        curr_batch_size = images.shape[0]

        # gt_bbox_scale = batch['scale'].cpu().numpy()
        # gt_bbox_center = batch['center'].cpu().numpy()
        
        with torch.no_grad():
            pred_rotmat, pred_betas, pred_camera = model(images)
            pred_output = smpl_neutral(betas=pred_betas, body_pose=pred_rotmat[:,1:], global_orient=pred_rotmat[:,0].unsqueeze(1), pose2rot=False)
            pred_vertices = pred_output.vertices

        if save_results:
            rot_pad = torch.tensor([0,0,1], dtype=torch.float32, device=device).view(1,3,1)
            rotmat = torch.cat((pred_rotmat.view(-1, 3, 3), rot_pad.expand(curr_batch_size * 24, -1, -1)), dim=-1)
            pred_pose = tgm.rotation_matrix_to_angle_axis(rotmat).contiguous().view(-1, 72)
            smpl_pose[step * batch_size:step * batch_size + curr_batch_size, :] = pred_pose.cpu().numpy()
            smpl_betas[step * batch_size:step * batch_size + curr_batch_size, :]  = pred_betas.cpu().numpy()
            smpl_camera[step * batch_size:step * batch_size + curr_batch_size, :]  = pred_camera.cpu().numpy()

    
        # 3D pose evaluation
        if eval_pose:
            # Regressor broadcasting
            J_regressor_batch = J_regressor[None, :].expand(pred_vertices.shape[0], -1, -1).to(device)
            # Get 14 ground truth joints
            if 'h36m' in dataset_name or 'mpi-inf' in dataset_name:
                gt_keypoints_3d = batch['pose_3d'].cuda()
                gt_keypoints_3d = gt_keypoints_3d[:, joint_mapper_gt, :-1]
            # For 3DPW get the 14 common joints from the rendered shape
            else:
                gt_vertices = smpl_male(global_orient=gt_pose[:,:3], body_pose=gt_pose[:,3:], betas=gt_betas).vertices 
                gt_vertices_female = smpl_female(global_orient=gt_pose[:,:3], body_pose=gt_pose[:,3:], betas=gt_betas).vertices 

                if seqName=='val2014':
                    gt_vertices_neutral = smpl_neutral(global_orient=gt_pose[:,:3], body_pose=gt_pose[:,3:], betas=gt_betas).vertices 
                    gt_vertices = gt_vertices_neutral
                else:
                    gt_vertices[gender==1, :, :] = gt_vertices_female[gender==1, :, :]
                gt_keypoints_3d = torch.matmul(J_regressor_batch, gt_vertices)
                gt_pelvis = gt_keypoints_3d[:, [0],:].clone()
                gt_keypoints_3d = gt_keypoints_3d[:, joint_mapper_h36m, :]
                gt_keypoints_3d = gt_keypoints_3d - gt_pelvis             


            # Get 14 predicted joints from the mesh
            pred_keypoints_3d = torch.matmul(J_regressor_batch, pred_vertices)
            if save_results:
                pred_joints[step * batch_size:step * batch_size + curr_batch_size, :, :]  = pred_keypoints_3d.cpu().numpy()
            pred_pelvis = pred_keypoints_3d[:, [0],:].clone()
            pred_keypoints_3d = pred_keypoints_3d[:, joint_mapper_h36m, :]
            pred_keypoints_3d = pred_keypoints_3d - pred_pelvis 

            # Absolute error (MPJPE)
            error = torch.sqrt(((pred_keypoints_3d - gt_keypoints_3d) ** 2).sum(dim=-1)).mean(dim=-1).cpu().numpy()

            error_upper = torch.sqrt(((pred_keypoints_3d - gt_keypoints_3d) ** 2).sum(dim=-1)).mean(dim=-1).cpu().numpy()
            # mpjpe[step * batch_size:step * batch_size + curr_batch_size] = error

            # Reconstuction_error
            r_error = reconstruction_error(pred_keypoints_3d.cpu().numpy(), gt_keypoints_3d.cpu().numpy(), reduction=None)

            r_error_upper = reconstruction_error(pred_keypoints_3d.cpu().numpy(), gt_keypoints_3d.cpu().numpy(), reduction=None)
            # recon_err[step * batch_size:step * batch_size + curr_batch_size] = r_error

            #Visualize GT vs prediction
            if False:
                from renderer import viewer2D
                from renderer import glViewer
                import humanModelViewer

                gt_cam_param = batch['cam_param'].cpu().numpy()
                pred_cam_param = pred_camera.detach().cpu().numpy()

                batchNum = gt_pose.shape[0]
                for i in range(batchNum):
                    curImgVis = deNormalizeBatchImg(images[i].cpu())
                    viewer2D.ImShow(curImgVis, name='rawIm', scale=1.0)

                    #move mesh to bbox space
                    vert_gt = gt_vertices[i].cpu().numpy()
                    vert_gt = convert_smpl_to_bbox(vert_gt, gt_cam_param[i][0], gt_cam_param[i][1:])

                    vert_pred = pred_vertices[i].cpu().numpy()
                    vert_pred = convert_smpl_to_bbox(vert_pred, pred_cam_param[i][0], pred_cam_param[i][1:])

                    smpl_face = humanModelViewer.GetSMPLFace()
                    # meshes_gt = {'ver': gt_vertices[i].cpu().numpy()*100, 'f': smpl_face, 'color': (255,0,0)}
                    # meshes_pred = {'ver': pred_vertices[i].cpu().numpy()*100, 'f': smpl_face, 'color': (0,255,0)}

                    meshes_gt = {'ver': vert_gt, 'f': smpl_face, 'color': (200,50,50)}
                    meshes_pred = {'ver': vert_pred, 'f': smpl_face, 'color': (50,200,50)}

                    glViewer.setMeshData([meshes_gt, meshes_pred], bComputeNormal= True)
                    glViewer.setBackgroundTexture(curImgVis)       #Vis raw video as background
                    glViewer.setWindowSize(curImgVis.shape[1]*5, curImgVis.shape[0]*5)
                    glViewer.SetOrthoCamera(True)
                    glViewer.show(0)
                    

            for ii, p in enumerate(batch['imgname'][:len(r_error)]):
                seqName = os.path.basename( os.path.dirname(p))
                # quant_mpjpe[step * batch_size:step * batch_size + curr_batch_size] = error
                if seqName not in quant_mpjpe.keys():
                    quant_mpjpe[seqName] = []
                    quant_recon_err[seqName] = []
                
                quant_mpjpe[seqName].append(error[ii]) 
                quant_recon_err[seqName].append(r_error[ii])


            #Visualize GT mesh and Pred Sekeleton
            if False:
                from renderer import viewer2D
                from renderer import glViewer
                import humanModelViewer

                gt_keypoints_3d_vis = gt_keypoints_3d.cpu().numpy()
                gt_keypoints_3d_vis = np.reshape(gt_keypoints_3d_vis, (gt_keypoints_3d_vis.shape[0],-1))        #N,14x3
                gt_keypoints_3d_vis = np.swapaxes(gt_keypoints_3d_vis, 0,1) *100

                pred_keypoints_3d_vis = pred_keypoints_3d.cpu().numpy()
                pred_keypoints_3d_vis = np.reshape(pred_keypoints_3d_vis, (pred_keypoints_3d_vis.shape[0],-1))        #N,14x3
                pred_keypoints_3d_vis = np.swapaxes(pred_keypoints_3d_vis, 0,1) *100
                # output_sample = output_sample[ : , np.newaxis]*0.1
                # gt_sample = gt_sample[: , np.newaxis]*0.1
                # (skelNum, dim, frames)
                for f in range(gt_keypoints_3d_vis.shape[1]):
                    glViewer.setSkeleton( [gt_keypoints_3d_vis[:,[f]], pred_keypoints_3d_vis[:,[f]]] ,jointType='smplcoco')#(skelNum, dim, frames)
                    glViewer.show(0)


            # Reconstuction_error
            # quant_recon_err[step * batch_size:step * batch_size + curr_batch_size] = r_error

            list_mpjpe = np.hstack([ quant_mpjpe[k] for k in quant_mpjpe])
            list_reconError = np.hstack([ quant_recon_err[k] for k in quant_recon_err])
            if bVerbose:
                print(">>> {} : MPJPE {:.02f} mm, error: {:.02f} mm | Total MPJPE {:.02f} mm, error {:.02f} mm".format(seqName, np.mean(error)*1000, np.mean(r_error)*1000, np.hstack(list_mpjpe).mean()*1000, np.hstack(list_reconError).mean()*1000) )

            # print("MPJPE {}, error: {}".format(np.mean(error)*100, np.mean(r_error)*100))

        # If mask or part evaluation, render the mask and part images
        # if eval_masks or eval_parts:
        #     mask, parts = renderer(pred_vertices, pred_camera)

        # Mask evaluation (for LSP)
        if eval_masks:
            center = batch['center'].cpu().numpy()
            scale = batch['scale'].cpu().numpy()
            # Dimensions of original image
            orig_shape = batch['orig_shape'].cpu().numpy()
            for i in range(curr_batch_size):
                # After rendering, convert imate back to original resolution
                pred_mask = uncrop(mask[i].cpu().numpy(), center[i], scale[i], orig_shape[i]) > 0
                # Load gt mask
                gt_mask = cv2.imread(os.path.join(annot_path, batch['maskname'][i]), 0) > 0
                # Evaluation consistent with the original UP-3D code
                accuracy += (gt_mask == pred_mask).sum()
                pixel_count += np.prod(np.array(gt_mask.shape))
                for c in range(2):
                    cgt = gt_mask == c
                    cpred = pred_mask == c
                    tp[c] += (cgt & cpred).sum()
                    fp[c] +=  (~cgt & cpred).sum()
                    fn[c] +=  (cgt & ~cpred).sum()
                f1 = 2 * tp / (2 * tp + fp + fn)

        # Part evaluation (for LSP)
        if eval_parts:
            center = batch['center'].cpu().numpy()
            scale = batch['scale'].cpu().numpy()
            orig_shape = batch['orig_shape'].cpu().numpy()
            for i in range(curr_batch_size):
                pred_parts = uncrop(parts[i].cpu().numpy().astype(np.uint8), center[i], scale[i], orig_shape[i])
                # Load gt part segmentation
                gt_parts = cv2.imread(os.path.join(annot_path, batch['partname'][i]), 0)
                # Evaluation consistent with the original UP-3D code
                # 6 parts + background
                for c in range(7):
                   cgt = gt_parts == c
                   cpred = pred_parts == c
                   cpred[gt_parts == 255] = 0
                   parts_tp[c] += (cgt & cpred).sum()
                   parts_fp[c] +=  (~cgt & cpred).sum()
                   parts_fn[c] +=  (cgt & ~cpred).sum()
                gt_parts[gt_parts == 255] = 0
                pred_parts[pred_parts == 255] = 0
                parts_f1 = 2 * parts_tp / (2 * parts_tp + parts_fp + parts_fn)
                parts_accuracy += (gt_parts == pred_parts).sum()
                parts_pixel_count += np.prod(np.array(gt_parts.shape))

        # Print intermediate results during evaluation
        if bVerbose:
            if step % log_freq == log_freq - 1:
                if eval_pose:
                    print('MPJPE: ' + str(1000 * mpjpe[:step * batch_size].mean()))
                    print('Reconstruction Error: ' + str(1000 * recon_err[:step * batch_size].mean()))
                    print()
                if eval_masks:
                    print('Accuracy: ', accuracy / pixel_count)
                    print('F1: ', f1.mean())
                    print()
                if eval_parts:
                    print('Parts Accuracy: ', parts_accuracy / parts_pixel_count)
                    print('Parts F1 (BG): ', parts_f1[[0,1,2,3,4,5,6]].mean())
                    print()

        # if step==3:     #Debug
        #     break
    # Save reconstructions to a file for further processing
    if save_results:
        np.savez(result_file, pred_joints=pred_joints, pose=smpl_pose, betas=smpl_betas, camera=smpl_camera)
    # Print final results during evaluation

    if bVerbose:
        print('*** Final Results ***')
        print()
    

    evalLog ={}

    if eval_pose:
        # if bVerbose:
        #     print('MPJPE: ' + str(1000 * mpjpe.mean()))
        #     print('Reconstruction Error: ' + str(1000 * recon_err.mean()))
        #     print()
        list_mpjpe = np.hstack([ quant_mpjpe[k] for k in quant_mpjpe])
        list_reconError = np.hstack([ quant_recon_err[k] for k in quant_recon_err])

        output_str ='SeqNames; '
        for seq in quant_mpjpe:
            output_str += seq + ';'
        output_str +='\n MPJPE; '
        quant_mpjpe_avg_mm = np.hstack(list_mpjpe).mean()*1000
        output_str += "Avg {:.02f} mm; ".format( quant_mpjpe_avg_mm)
        for seq in quant_mpjpe:
            output_str += '{:.02f}; '.format(1000 * np.hstack(quant_mpjpe[seq]).mean())


        output_str +='\n Recon Error; '
        quant_recon_error_avg_mm = np.hstack(list_reconError).mean()*1000
        output_str +="Avg {:.02f}mm; ".format( quant_recon_error_avg_mm )
        for seq in quant_recon_err:
            output_str += '{:.02f}; '.format(1000 * np.hstack(quant_recon_err[seq]).mean())
        if bVerbose:
            print(output_str)
        else:
            print(">>>  Test on 3DPW: MPJPE: {} | quant_recon_error_avg_mm: {}".format(quant_mpjpe_avg_mm, quant_recon_error_avg_mm) )

        #Save output to dict
        # evalLog['checkpoint']= args.checkpoint
        evalLog['testdb'] = dataset_name
        evalLog['datasize'] = len(data_loader.dataset)
        
        for seq in quant_mpjpe:
            quant_mpjpe[seq] = 1000 * np.hstack(quant_mpjpe[seq]).mean()
        for seq in quant_recon_err:
            quant_recon_err[seq] = 1000 * np.hstack(quant_recon_err[seq]).mean()

        evalLog['quant_mpjpe'] = quant_mpjpe              #MPJPE
        evalLog['quant_recon_err']= quant_recon_err   #PA-MPJPE
        evalLog['quant_output_logstr']= output_str   #PA-MPJPE
        

        evalLog['quant_mpjpe_avg_mm'] = quant_mpjpe_avg_mm              #MPJPE
        evalLog['quant_recon_error_avg_mm']= quant_recon_error_avg_mm   #PA-MPJPE
       
        # return quant_mpjpe_avg_mm, quant_recon_error_avg_mm, evalLog
        return evalLog

    if bVerbose:
        if eval_masks:
            print('Accuracy: ', accuracy / pixel_count)
            print('F1: ', f1.mean())
            print()
        if eval_parts:
            print('Parts Accuracy: ', parts_accuracy / parts_pixel_count)
            print('Parts F1 (BG): ', parts_f1[[0,1,2,3,4,5,6]].mean())
            print()

        
    return -1       #Should return something
示例#5
0
def visEFT_singleSubject(renderer):

    bStopForEachSample = args.waitforkeys  #if True, it will wait for any key pressed to move to the next sample

    inputData = args.fit_data
    imgDir = args.img_dir

    #Load SMPL model
    smplModelPath = args.smpl_dir + '/basicModel_neutral_lbs_10_207_0_v1.0.0.pkl'
    smpl = SMPL(smplModelPath, batch_size=1, create_transl=False)

    print("Loading coco annotation from:{}".format(args.cocoAnnotFile))
    assert os.path.exists(args.cocoAnnotFile)
    cocoAnnotDic = loadCOCOAnnot(args.cocoAnnotFile)

    #Load EFT fitting data
    print(f"Loading EFT data from {inputData}")
    if os.path.exists(inputData):
        with open(inputData, 'r') as f:
            eft_data = json.load(f)
            print("EFT data: ver {}".format(eft_data['ver']))
            eft_data_all = eft_data['data']
    else:
        print(f"ERROR:: Cannot find EFT data: {inputData}")
        assert False

    #Visualize each EFT Fitting output
    for idx, eft_data in enumerate(eft_data_all):

        #Get raw image path
        imgFullPath = eft_data['imageName']
        imgName = os.path.basename(imgFullPath)
        imgFullPath = os.path.join(imgDir, imgName)
        if os.path.exists(imgFullPath) == False:
            print(f"Img path is not valid: {imgFullPath}")
            assert False
        rawImg = cv2.imread(imgFullPath)
        print(f'Input image: {imgFullPath}')

        #EFT data
        bbox_scale = eft_data['bbox_scale']
        bbox_center = eft_data['bbox_center']

        pred_camera = np.array(eft_data['parm_cam'])
        pred_betas = np.reshape(
            np.array(eft_data['parm_shape'], dtype=np.float32),
            (1, 10))  #(10,)
        pred_betas = torch.from_numpy(pred_betas)

        pred_pose_rotmat = np.reshape(
            np.array(eft_data['parm_pose'], dtype=np.float32),
            (1, 24, 3, 3))  #(24,3,3)
        pred_pose_rotmat = torch.from_numpy(pred_pose_rotmat)

        keypoint_2d_validity = eft_data['joint_validity_openpose18']

        #COCO only. Annotation index
        print("COCO annotId: {}".format(eft_data['annotId']))
        annot = cocoAnnotDic[eft_data['annotId']]
        print(annot['bbox'])

        ########################
        #Visualize COCO annotation
        annot_keypoint = np.reshape(
            np.array(annot['keypoints'], dtype=np.float32), (-1, 3))  #17,3
        rawImg = viewer2D.Vis_Skeleton_2D_coco(annot_keypoint[:, :2],
                                               annot_keypoint[:, 2],
                                               image=rawImg)
        rawImg = viewer2D.Vis_Bbox(rawImg, annot['bbox'], color=(0, 255, 0))

        #Get SMPL mesh and joints from SMPL parameters
        smpl_output = smpl(betas=pred_betas,
                           body_pose=pred_pose_rotmat[:, 1:],
                           global_orient=pred_pose_rotmat[:, [0]],
                           pose2rot=False)
        smpl_vertices = smpl_output.vertices.detach().cpu().numpy()[0]
        smpl_joints_3d = smpl_output.joints.detach().cpu().numpy()[0]

        #Crop image using cropping information
        croppedImg, boxScale_o2n, bboxTopLeft = crop_bboxInfo(
            rawImg, bbox_center, bbox_scale, (BBOX_IMG_RES, BBOX_IMG_RES))

        ########################
        # Visualization of EFT
        ########################

        # Visualize 2D image
        if True:
            viewer2D.ImShow(rawImg, name='rawImg',
                            waitTime=1)  #You should press any key
            viewer2D.ImShow(croppedImg, name='croppedImg', waitTime=1)

            #Convert bbox_center, bbox_scale --> bbox_xyxy
            bbox_xyxy = conv_bboxinfo_bboxXYXY(bbox_scale, bbox_center)
            img_bbox = viewer2D.Vis_Bbox_minmaxPt(rawImg.copy(), bbox_xyxy[:2],
                                                  bbox_xyxy[2:])
            viewer2D.ImShow(img_bbox, name='img_bbox', waitTime=1)

        # Visualization Mesh
        if True:
            camParam_scale = pred_camera[0]
            camParam_trans = pred_camera[1:]
            pred_vert_vis = smpl_vertices
            smpl_joints_3d_vis = smpl_joints_3d

            if True:  #args.onbbox:
                pred_vert_vis = convert_smpl_to_bbox(pred_vert_vis,
                                                     camParam_scale,
                                                     camParam_trans)
                smpl_joints_3d_vis = convert_smpl_to_bbox(
                    smpl_joints_3d_vis, camParam_scale, camParam_trans)
                renderer.setBackgroundTexture(croppedImg)
                renderer.setViewportSize(croppedImg.shape[1],
                                         croppedImg.shape[0])

            pred_meshes = {'ver': pred_vert_vis, 'f': smpl.faces}
            v = pred_meshes['ver']
            f = pred_meshes['f']

            #Visualize in the original image space
            renderer.set_mesh(v, f)
            renderer.showBackground(True)
            renderer.setWorldCenterBySceneCenter()
            renderer.setCameraViewMode("cam")

            renderer.setViewportSize(croppedImg.shape[1], croppedImg.shape[0])
            renderer.display()
            renderImg = renderer.get_screen_color_ibgr()
            viewer2D.ImShow(renderImg, waitTime=1)

        # Visualization Mesh on side view
        if True:
            renderer.showBackground(False)
            renderer.setWorldCenterBySceneCenter()
            renderer.setCameraViewMode("side")

            renderer.setViewportSize(croppedImg.shape[1], croppedImg.shape[0])
            renderer.display()
            sideImg = renderer.get_screen_color_ibgr()  #Overwite on rawImg
            viewer2D.ImShow(sideImg, waitTime=1)

            sideImg = cv2.resize(sideImg,
                                 (renderImg.shape[1], renderImg.shape[0]))

        #Visualize camera view and side view
        saveImg = np.concatenate((renderImg, sideImg), axis=1)

        if bStopForEachSample:
            viewer2D.ImShow(
                saveImg, waitTime=0
            )  #waitTime=0 means that it will wait for any key pressed
        else:
            viewer2D.ImShow(saveImg, waitTime=1)

        #Save the rendered image to files
        if False:
            if os.path.exists(args.render_dir) == False:
                os.mkdir(args.render_dir)
            render_output_path = args.render_dir + '/render_{:08d}.jpg'.format(
                idx)
            print(f"Save to {render_output_path}")
            cv2.imwrite(render_output_path, saveImg)
示例#6
0
def visEFT_multiSubjects(renderer):

    bStopForEachSample = args.waitforkeys      #if True, it will wait for any key pressed to move to the next sample
    bShowTurnTable = args.turntable
    
    # inputDir = args.fit_dir
    inputData = args.fit_data
    imgDir = args.img_dir
    smplModelPath = args.smpl_dir + '/basicModel_neutral_lbs_10_207_0_v1.0.0.pkl'
    smpl = SMPL(smplModelPath, batch_size=1, create_transl=False)

    if os.path.exists(inputData):
        with open(inputData,'r') as f:
            eft_data = json.load(f)
            print("EFT data: ver {}".format(eft_data['ver']))
            eft_data_all = eft_data['data']
    else:
        print(f"ERROR:: Cannot find EFT data: {inputData}")
        assert False

    #Aggregate all efl per image
    eft_perimage ={}
    for idx, eft_data in enumerate(eft_data_all):
        #Load
        imageName = eft_data['imageName']
        if imageName not in eft_perimage.keys():
            eft_perimage[imageName] =[]

        eft_perimage[imageName].append(eft_data)


    for imgName in eft_perimage:
        eft_data_perimage = eft_perimage[imgName]
        
        renderer.clear_mesh()

        for idx,eft_data in enumerate(eft_data_perimage):
            
            #Get raw image path
            imgFullPath = eft_data['imageName']
            imgName = os.path.basename(imgFullPath)
            imgFullPath =os.path.join(imgDir, imgName)
            if os.path.exists(imgFullPath) ==False:
                print(f"Img path is not valid: {imgFullPath}")
                assert False
            rawImg = cv2.imread(imgFullPath)
            print(f'Input image: {imgFullPath}')

            bbox_scale = eft_data['bbox_scale']
            bbox_center = eft_data['bbox_center']

            pred_camera = np.array(eft_data['parm_cam'])
            pred_betas = np.reshape(np.array( eft_data['parm_shape'], dtype=np.float32), (1,10) )     #(10,)
            pred_betas = torch.from_numpy(pred_betas)

            pred_pose_rotmat = np.reshape( np.array( eft_data['parm_pose'], dtype=np.float32), (1,24,3,3)  )        #(24,3,3)
            pred_pose_rotmat = torch.from_numpy(pred_pose_rotmat)
        
            # gt_keypoint_2d = np.reshape( np.array(eft_data['gt_keypoint_2d']), (-1,3))    #(49,3)
            keypoint_2d_validity = eft_data['joint_validity_openpose18']

            #COCO only. Annotation index
            print("COCO annotId: {}".format(eft_data['annotId']))

            #Obtain skeleton and smpl data
            smpl_output = smpl(betas=pred_betas, body_pose=pred_pose_rotmat[:,1:], global_orient=pred_pose_rotmat[:,0].unsqueeze(1), pose2rot=False )
            smpl_vertices = smpl_output.vertices.detach().cpu().numpy() 
            smpl_joints_3d = smpl_output.joints.detach().cpu().numpy() 

            #Crop image
            croppedImg, boxScale_o2n, bboxTopLeft = crop_bboxInfo(rawImg.copy(), bbox_center, bbox_scale, (BBOX_IMG_RES, BBOX_IMG_RES) )

            ########################
            # Visualize
            # Visualize 2D image
            if False:
                viewer2D.ImShow(rawImg, name='rawImg', waitTime=1)      #You should press any key 
                viewer2D.ImShow(croppedImg, name='croppedImg', waitTime=1)

            # Visualization Mesh on raw images
            if True:    
                camParam_scale = pred_camera[0]
                camParam_trans = pred_camera[1:]
                pred_vert_vis = smpl_vertices[0]
                smpl_joints_3d_vis = smpl_joints_3d[0]

                if False:#args.onbbox:      #Always in the original image
                    pred_vert_vis = convert_smpl_to_bbox(pred_vert_vis, camParam_scale, camParam_trans)
                    smpl_joints_3d_vis = convert_smpl_to_bbox(smpl_joints_3d_vis, camParam_scale, camParam_trans)
                    renderer.setBackgroundTexture(croppedImg)
                    renderer.setViewportSize(croppedImg.shape[1], croppedImg.shape[0])
                else:
                    #Covert SMPL to BBox first
                    pred_vert_vis = convert_smpl_to_bbox(pred_vert_vis, camParam_scale, camParam_trans)
                    smpl_joints_3d_vis = convert_smpl_to_bbox(smpl_joints_3d_vis, camParam_scale, camParam_trans)

                    #From cropped space to original
                    pred_vert_vis = convert_bbox_to_oriIm(pred_vert_vis, boxScale_o2n, bboxTopLeft, rawImg.shape[1], rawImg.shape[0]) 
                    smpl_joints_3d_vis = convert_bbox_to_oriIm(smpl_joints_3d_vis, boxScale_o2n, bboxTopLeft, rawImg.shape[1], rawImg.shape[0])
                    renderer.setBackgroundTexture(rawImg)
                    renderer.setViewportSize(rawImg.shape[1], rawImg.shape[0])

                pred_meshes = {'ver': pred_vert_vis, 'f': smpl.faces}
                v = pred_meshes['ver'] 
                f = pred_meshes['f']

                #Visualize in the original image spaceq
                # renderer.set_mesh(v,f)
                renderer.add_mesh(v,f)

        #Render Mesh on the camera view
        renderer.showBackground(True)
        renderer.setWorldCenterBySceneCenter()
        renderer.setCameraViewMode("cam")
        renderer.display()
        overlaid = renderer.get_screen_color_ibgr()        #Overwite on rawImg
        # viewer2D.ImShow(overlaid,waitTime=1,name="overlaid")

        if bStopForEachSample:
            viewer2D.ImShow(overlaid,waitTime=0,name="overlaid") #waitTime=0 means that it will wait for any key pressed
        else:
            viewer2D.ImShow(overlaid,waitTime=1,name="overlaid")

        #Render Mesh on the rotating view
        if bShowTurnTable:
            renderer.showBackground(False)
            renderer.setWorldCenterBySceneCenter()
            renderer.setCameraViewMode("free")
            for i in range(90):
                renderer.setViewAngle(i*4,0)
                renderer.display()
                sideImg = renderer.get_screen_color_ibgr()        #Overwite on rawImg
                viewer2D.ImShow(sideImg,waitTime=1,name="turn_table")
            
        if True:    #Save the rendered image to files
            if os.path.exists(args.render_dir) == False:
                os.mkdir(args.render_dir)
            render_output_path = args.render_dir + '/render_{}.jpg'.format(imgName)
            print(f"Save to {render_output_path}")
            cv2.imwrite(render_output_path, rawImg)
示例#7
0
def visEFT_singleSubject(renderer):

    MAGNIFY_RATIO = 3  #onbbox only. To magnify the rendered image size

    bStopForEachSample = args.waitforkeys  #if True, it will wait for any key pressed to move to the next sample
    bShowTurnTable = args.turntable

    inputData = args.fit_data
    imgDir = args.img_dir

    #Load SMPL model
    smplModelPath = args.smpl_dir + '/basicModel_neutral_lbs_10_207_0_v1.0.0.pkl'
    smpl = SMPL(smplModelPath, batch_size=1, create_transl=False)

    #Load EFT fitting data
    print(f"Loading EFT data from {inputData}")
    if os.path.exists(inputData):
        with open(inputData, 'r') as f:
            eft_data = json.load(f)
            print("EFT data: ver {}".format(eft_data['ver']))
            eft_data_all = eft_data['data']
    else:
        print(f"ERROR:: Cannot find EFT data: {inputData}")
        assert False

    #Visualize each EFT Fitting output
    for idx, eft_data in enumerate(tqdm(eft_data_all)):

        #Get raw image path
        imgFullPath = eft_data['imageName']
        # imgName = os.path.basename(imgFullPath)
        imgName = imgFullPath
        imgFullPath = os.path.join(imgDir, imgName)
        if os.path.exists(imgFullPath) == False:
            print(f"Img path is not valid: {imgFullPath}")
            assert False
        rawImg = cv2.imread(imgFullPath)
        print(f'Input image: {imgFullPath}')

        #EFT data
        bbox_scale = eft_data['bbox_scale']
        bbox_center = eft_data['bbox_center']

        pred_camera = np.array(eft_data['parm_cam'])
        pred_betas = np.reshape(
            np.array(eft_data['parm_shape'], dtype=np.float32),
            (1, 10))  #(10,)
        pred_betas = torch.from_numpy(pred_betas)

        pred_pose_rotmat = np.reshape(
            np.array(eft_data['parm_pose'], dtype=np.float32),
            (1, 24, 3, 3))  #(24,3,3)
        pred_pose_rotmat = torch.from_numpy(pred_pose_rotmat)

        keypoint_2d_validity = eft_data['joint_validity_openpose18']

        #COCO only. Annotation index
        if 'annotId' in eft_data.keys():
            print("COCO annotId: {}".format(eft_data['annotId']))

        #Get SMPL mesh and joints from SMPL parameters
        smpl_output = smpl(betas=pred_betas,
                           body_pose=pred_pose_rotmat[:, 1:],
                           global_orient=pred_pose_rotmat[:, [0]],
                           pose2rot=False)
        smpl_vertices = smpl_output.vertices.detach().cpu().numpy()[0]
        smpl_joints_3d = smpl_output.joints.detach().cpu().numpy()[0]

        #Crop image using cropping information
        croppedImg, boxScale_o2n, bboxTopLeft = crop_bboxInfo(
            rawImg, bbox_center, bbox_scale, (BBOX_IMG_RES, BBOX_IMG_RES))

        if MAGNIFY_RATIO > 1:
            croppedImg = cv2.resize(croppedImg,
                                    (croppedImg.shape[1] * MAGNIFY_RATIO,
                                     croppedImg.shape[0] * MAGNIFY_RATIO))

        ########################
        # Visualization
        ########################

        # Visualize 2D image
        if True:
            viewer2D.ImShow(rawImg, name='rawImg',
                            waitTime=1)  #You should press any key
            viewer2D.ImShow(croppedImg, name='croppedImg', waitTime=1)

            #Convert bbox_center, bbox_scale --> bbox_xyxy
            bbox_xyxy = conv_bboxinfo_bboxXYXY(bbox_scale, bbox_center)
            img_bbox = viewer2D.Vis_Bbox_minmaxPt(rawImg.copy(), bbox_xyxy[:2],
                                                  bbox_xyxy[2:])
            viewer2D.ImShow(img_bbox, name='img_bbox', waitTime=1)

        # Visualization Mesh
        if True:
            camParam_scale = pred_camera[0]
            camParam_trans = pred_camera[1:]
            pred_vert_vis = smpl_vertices
            smpl_joints_3d_vis = smpl_joints_3d

            if args.onbbox:
                pred_vert_vis = convert_smpl_to_bbox(pred_vert_vis,
                                                     camParam_scale,
                                                     camParam_trans)
                smpl_joints_3d_vis = convert_smpl_to_bbox(
                    smpl_joints_3d_vis, camParam_scale, camParam_trans)
                renderer.setBackgroundTexture(croppedImg)
                renderer.setViewportSize(croppedImg.shape[1],
                                         croppedImg.shape[0])

                pred_vert_vis *= MAGNIFY_RATIO
            else:
                #Covert SMPL to BBox first
                pred_vert_vis = convert_smpl_to_bbox(pred_vert_vis,
                                                     camParam_scale,
                                                     camParam_trans)
                smpl_joints_3d_vis = convert_smpl_to_bbox(
                    smpl_joints_3d_vis, camParam_scale, camParam_trans)

                #From cropped space to original
                pred_vert_vis = convert_bbox_to_oriIm(pred_vert_vis,
                                                      boxScale_o2n,
                                                      bboxTopLeft,
                                                      rawImg.shape[1],
                                                      rawImg.shape[0])
                smpl_joints_3d_vis = convert_bbox_to_oriIm(
                    smpl_joints_3d_vis, boxScale_o2n, bboxTopLeft,
                    rawImg.shape[1], rawImg.shape[0])
                renderer.setBackgroundTexture(rawImg)
                renderer.setViewportSize(rawImg.shape[1], rawImg.shape[0])

                #In orthographic model. XY of 3D is just 2D projection
                smpl_joints_2d_vis = conv_3djoint_2djoint(
                    smpl_joints_3d_vis, rawImg.shape)
                # image_2dkeypoint_pred = viewer2D.Vis_Skeleton_2D_smpl45(smpl_joints_2d_vis, image=rawImg.copy(),color=(0,255,255))
                image_2dkeypoint_pred = viewer2D.Vis_Skeleton_2D_Openpose18(
                    smpl_joints_2d_vis, image=rawImg.copy(),
                    color=(255, 0, 0))  #All 2D joint
                image_2dkeypoint_pred = viewer2D.Vis_Skeleton_2D_Openpose18(
                    smpl_joints_2d_vis,
                    pt2d_visibility=keypoint_2d_validity,
                    image=image_2dkeypoint_pred,
                    color=(0, 255, 255))  #Only valid
                viewer2D.ImShow(image_2dkeypoint_pred,
                                name='keypoint_2d_pred',
                                waitTime=1)

            pred_meshes = {'ver': pred_vert_vis, 'f': smpl.faces}
            v = pred_meshes['ver']
            f = pred_meshes['f']

            #Visualize in the original image space
            renderer.set_mesh(v, f)
            renderer.showBackground(True)
            renderer.setWorldCenterBySceneCenter()
            renderer.setCameraViewMode("cam")

            #Set image size for rendering
            if args.onbbox:
                renderer.setViewportSize(croppedImg.shape[1],
                                         croppedImg.shape[0])
            else:
                renderer.setViewportSize(rawImg.shape[1], rawImg.shape[0])

            renderer.display()
            renderImg = renderer.get_screen_color_ibgr()
            viewer2D.ImShow(renderImg, waitTime=1)

        # Visualization Mesh on side view
        if True:
            renderer.showBackground(False)
            renderer.setWorldCenterBySceneCenter()
            # renderer.setCameraViewMode("side")    #To show the object in side vie
            renderer.setCameraViewMode("free")
            renderer.setViewAngle(90, 20)

            #Set image size for rendering
            if args.onbbox:
                renderer.setViewportSize(croppedImg.shape[1],
                                         croppedImg.shape[0])
            else:
                renderer.setViewportSize(rawImg.shape[1], rawImg.shape[0])
            renderer.display()
            sideImg = renderer.get_screen_color_ibgr()  #Overwite on rawImg
            viewer2D.ImShow(sideImg, waitTime=1)

            sideImg = cv2.resize(sideImg,
                                 (renderImg.shape[1], renderImg.shape[0]))
            # renderImg = cv2.resize(renderImg, (sideImg.shape[1], sideImg.shape[0]) )

        # Visualization Mesh on side view
        if True:
            renderer.showBackground(False)
            renderer.setWorldCenterBySceneCenter()
            # renderer.setCameraViewMode("side")    #To show the object in side vie
            renderer.setCameraViewMode("free")
            renderer.setViewAngle(-60, 50)

            #Set image size for rendering
            if args.onbbox:
                renderer.setViewportSize(croppedImg.shape[1],
                                         croppedImg.shape[0])
            else:
                renderer.setViewportSize(rawImg.shape[1], rawImg.shape[0])
            renderer.display()
            sideImg_2 = renderer.get_screen_color_ibgr()  #Overwite on rawImg
            viewer2D.ImShow(sideImg_2, waitTime=1)

            sideImg_2 = cv2.resize(sideImg_2,
                                   (renderImg.shape[1], renderImg.shape[0]))
            # renderImg = cv2.resize(renderImg, (sideImg.shape[1], sideImg.shape[0]) )

        #Visualize camera view and side view
        saveImg = np.concatenate((renderImg, sideImg), axis=1)
        # saveImg = np.concatenate( (croppedImg, renderImg,sideImg, sideImg_2), axis =1)

        if bStopForEachSample:
            viewer2D.ImShow(
                saveImg, waitTime=0
            )  #waitTime=0 means that it will wait for any key pressed
        else:
            viewer2D.ImShow(saveImg, waitTime=1)

        #Render Mesh on the rotating view
        if bShowTurnTable:
            renderer.showBackground(False)
            renderer.setWorldCenterBySceneCenter()
            renderer.setCameraViewMode("free")
            for i in range(90):
                renderer.setViewAngle(i * 4, 0)
                renderer.display()
                sideImg = renderer.get_screen_color_ibgr()  #Overwite on rawImg
                viewer2D.ImShow(sideImg, waitTime=1, name="turn_table")

                if False:  #If you want to save this into files
                    render_output_path = args.render_dir + '/turntable_{}_{:08d}.jpg'.format(
                        os.path.basename(imgName), i)
                    cv2.imwrite(render_output_path, sideImg)

        #Save the rendered image to files
        if True:
            if os.path.exists(args.render_dir) == False:
                os.mkdir(args.render_dir)
            render_output_path = args.render_dir + '/render_{}_eft{:08d}.jpg'.format(
                imgName[:-4], idx)
            print(f"Save to {render_output_path}")
            cv2.imwrite(render_output_path, saveImg)