Ejemplo n.º 1
0
    def _visualize_gui_naive(self, meshList, skelList=None, body_bbox_list=None, img_original=None, normal_compute=True):
        """
            args:
                meshList: list of {'ver': pred_vertices, 'f': smpl.faces}
                skelList: list of [JointNum*3, 1]       (where 1 means num. of frames in glviewer)
                bbr_list: list of [x,y,w,h] 
        """
        if body_bbox_list is not None:
            for bbr in body_bbox_list:
                viewer2D.Vis_Bbox(img_original, bbr)
        # viewer2D.ImShow(img_original)

        glViewer.setWindowSize(img_original.shape[1], img_original.shape[0])
        # glViewer.setRenderOutputSize(inputImg.shape[1],inputImg.shape[0])
        glViewer.setBackgroundTexture(img_original)
        glViewer.SetOrthoCamera(True)
        glViewer.setMeshData(meshList, bComputeNormal= normal_compute)        # meshes = {'ver': pred_vertices, 'f': smplWrapper.f}

        if skelList is not None:
            glViewer.setSkeleton(skelList)

        if True:   #Save to File
            if True:        #Cam view rendering
                # glViewer.setSaveFolderName(overlaidImageFolder)
                glViewer.setNearPlane(50)
                glViewer.setWindowSize(img_original.shape[1], img_original.shape[0])
                # glViewer.show_SMPL(bSaveToFile = True, bResetSaveImgCnt = False, countImg = False, mode = 'camera')
                glViewer.show(1)

            if False:    #Side view rendering
                # glViewer.setSaveFolderName(sideImageFolder)
                glViewer.setNearPlane(50)
                glViewer.setWindowSize(img_original.shape[1], img_original.shape[0])
                glViewer.show_SMPL(bSaveToFile = True, bResetSaveImgCnt = False, countImg = True, zoom=1108, mode = 'youtube')
Ejemplo n.º 2
0
    def _visualize_screenless_naive(self,
                                    meshList,
                                    skelList=None,
                                    body_bbox_list=None,
                                    img_original=None,
                                    vis=False,
                                    maxHeight=1080):
        """
            args:
                meshList: list of {'ver': pred_vertices, 'f': smpl.faces}
                skelList: list of [JointNum*3, 1]       (where 1 means num. of frames in glviewer)
                bbr_list: list of [x,y,w,h] 
            output:
                #Rendered images are saved in 
                self.renderout['render_camview']
                self.renderout['render_sideview']

            #Note: The size of opengl rendering is restricted by the current screen size. Set the maxHeight accordingly

        """
        assert self.renderer is not None

        if len(meshList) == 0:
            # sideImg = cv2.resize(sideImg, (renderImg.shape[1], renderImg.shape[0]) )
            self.renderout = {}
            self.renderout['render_camview'] = img_original.copy()

            blank = np.ones(img_original.shape,
                            dtype=np.uint8) * 255  #generate blank image
            self.renderout['render_sideview'] = blank

            return

        if body_bbox_list is not None:
            for bbr in body_bbox_list:
                viewer2D.Vis_Bbox(img_original, bbr)
        # viewer2D.ImShow(img_original)

        #Check image height
        imgHeight, imgWidth = img_original.shape[0], img_original.shape[1]
        if maxHeight < imgHeight:  #Resize
            ratio = maxHeight / imgHeight

            #Resize Img
            newWidth = int(imgWidth * ratio)
            newHeight = int(imgHeight * ratio)
            img_original_resized = cv2.resize(img_original,
                                              (newWidth, newHeight))

            #Resize skeleton
            for m in meshList:
                m['ver'] *= ratio

            for s in skelList:
                s *= ratio

        else:
            img_original_resized = img_original

        self.renderer.setWindowSize(img_original_resized.shape[1],
                                    img_original_resized.shape[0])
        self.renderer.setBackgroundTexture(img_original_resized)
        self.renderer.setViewportSize(img_original_resized.shape[1],
                                      img_original_resized.shape[0])

        # self.renderer.add_mesh(meshList[0]['ver'],meshList[0]['f'])
        self.renderer.clear_mesh()
        for mesh in meshList:
            self.renderer.add_mesh(mesh['ver'], mesh['f'])
        self.renderer.showBackground(True)
        self.renderer.setWorldCenterBySceneCenter()
        self.renderer.setCameraViewMode("cam")
        # self.renderer.setViewportSize(img_original_resized.shape[1], img_original_resized.shape[0])

        self.renderer.display()
        renderImg = self.renderer.get_screen_color_ibgr()

        if vis:
            viewer2D.ImShow(renderImg, waitTime=1, name="rendered")

        ###Render Side View
        self.renderer.setCameraViewMode("free")
        self.renderer.setViewAngle(90, 20)
        self.renderer.showBackground(False)
        self.renderer.setViewportSize(img_original_resized.shape[1],
                                      img_original_resized.shape[0])
        self.renderer.display()
        sideImg = self.renderer.get_screen_color_ibgr()  #Overwite on rawImg

        if vis:
            viewer2D.ImShow(sideImg, waitTime=0, name="sideview")

        # sideImg = cv2.resize(sideImg, (renderImg.shape[1], renderImg.shape[0]) )
        self.renderout = {}
        self.renderout['render_camview'] = renderImg
        self.renderout['render_sideview'] = sideImg
Ejemplo n.º 3
0
def RunMonomocap(args, video_path, visualizer, bboxdetector, bodymocap, device, renderOutRoot):

    #Set up output folders
    if renderOutRoot:
        outputFileName = 'scene_%08d.jpg' # Hardcoded in glViewer.py
        if os.path.exists(renderOutRoot)==False:
            os.mkdir(renderOutRoot)

        overlaidImageFolder= os.path.join(renderOutRoot, 'overlaid')
        if os.path.exists(overlaidImageFolder)==False:
            os.mkdir(overlaidImageFolder)

        sideImageFolder= os.path.join(renderOutRoot, 'side')
        if os.path.exists(sideImageFolder)==False:
            os.mkdir(sideImageFolder)

        mergedImageFolder= os.path.join(renderOutRoot, 'merged')
        if os.path.exists(mergedImageFolder)==False:
            os.mkdir(mergedImageFolder)

        g_renderDir= os.path.join(renderOutRoot, 'render')
        if os.path.exists(g_renderDir)==False:
            os.mkdir(g_renderDir)

    #Set up input data (images or webcam)
    imageList =[]
    loaded_bboxList =None 
    cap =None
    if os.path.isdir(video_path):       #if video_path is a dir, load all videos


        imageList = sorted(os.listdir(video_path))

        if len(imageList)>0  and imageList[0][-4:] =='json':        #Handling bbox dir input
            print("Found that this input folder has bboxes.")
            bboxFiles = imageList
            imageList=[]
            loaded_bboxList =[]
            for bn in bboxFiles:
                bf = os.path.join(video_path, bn)
                with open(bf,'r') as f:
                    bbox = json.load(f)
                    assert  'imgPath' in bbox and 'bboxes_xywh' in bbox
                    imageList.append(bbox['imgPath'])

                    bboxes_np = [ np.array(d) for d in bbox['bboxes_xywh']]
                    loaded_bboxList.append(bboxes_np)

        else:       #Otherwise, image dir
            imageList = [os.path.join(video_path,f) for f in imageList]
    else:
        cap = cv2.VideoCapture(video_path)
        if os.path.exists(video_path):
            print("valid")
        if cap.isOpened()==False:
            print(f"Failed in opening video: {video_path}")
            assert False

    now = datetime.now()
    seqName = now.today().strftime("%d_%m_%Y_")+ now.strftime("%H%M%S")
    print(f"seqName{seqName}")
    cur_frame = args.startFrame -1
    while(True):
        # print("Start Mocap")
        g_timer.tic()    

        cur_frame += 1        #starting from 0
        meshList =[]
        skelList =[]

        if len(imageList)>0:        #If the path is a folder
            if len(imageList)<=cur_frame:
                break
            elif args.endFrame>=0 and cur_frame > args.endFrame:
                break
            else:
                fName = imageList[cur_frame]
                img_original_bgr  = cv2.imread(fName)
        else:       #cap is None
            _, img_original_bgr = cap.read()
            fName = 'scene_{:08d}.pkl'.format(cur_frame)    

            if img_original_bgr is None: # Restart video at the end
                print("Warninig: img_original_bgr ==  None")
                # cap = cv2.VideoCapture(video_path)
                # ret, camInputFrame = cap.read()
                break   #Stop processing at the end of video

            if cap.isOpened()==False:
                print(">> Error: Input data is not valid or unavailable.")
                if args.url is not None:
                    print(">> Error: There would be version issues of your OpenCV in handling URL as the input stream")
                    print(">> Suggestion 1: Try to download the video via youtube-dl and put the video path as input")
                    print(">> Suggestion 2: Use --download or --d flag to automatically download and process it")
                    print("")
                assert False

        # Our operations on the frame come here
        # if cap is not None:  #If input from VideoCapture
        # img_original_rgb = cv2.cvtColor(img_original_bgr, cv2.COLOR_BGR2RGB)          #Our model is trained with RGB
        # Display the resulting frame
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

        #Check existence of already processed data
        if args.skip and renderOutRoot:
            # viewer2D.ImShow(overlaidImg)
            mergedImgFileName = '{0}/{1}'.format(mergedImageFolder,outputFileName%cur_frame)
            if os.path.exists(mergedImgFileName):
                print(f"Already exists: {mergedImgFileName}")
                continue


        ######################################################
        ## BBox detection

        if loaded_bboxList is not None and len(loaded_bboxList)==len(imageList):
            bboxXYWH_list = loaded_bboxList[cur_frame]
        else:
            bboxXYWH_list = bboxdetector.detectBbox(img_original_bgr)

        if args.bboxout:
            # bboxXYWH_list
            if renderOutRoot is None:
                    print("Please set output folder by --out")
                    assert False
            else:
                bboxOutFolder = os.path.join(renderOutRoot,'bbox')
                if not os.path.exists(bboxOutFolder):
                    os.mkdir(bboxOutFolder)

                outputFileName_json = os.path.join(bboxOutFolder,os.path.basename(fName)[:-4]+'.json')
                fout = open(outputFileName_json,'w')
                temp = [ list(d.astype(int)) for d in bboxXYWH_list ]
                bboxXYWH_list_saved =[]
                for d in temp:
                    bboxXYWH_list_saved.append([int(dd) for dd in d])
                json.dump( {'imgPath': fName, 'bboxes_xywh':bboxXYWH_list_saved}, fout)
                fout.close()
                


        #Sort the bbox using bbox size (to make the order as consistent as possible without tracking)
        diaSize =  [ (x[2]**2 + x[3]**2) for x in bboxXYWH_list]

        idx_big2small = np.argsort(diaSize)[::-1]
        bboxXYWH_list = [ bboxXYWH_list[i] for i in idx_big2small ] #sorted, big2small

        if args.single and len(bboxXYWH_list)>1:
            bboxXYWH_list = [ bboxXYWH_list[0] ]        #nparray (1,4)

            #Chose the biggest one
            # diaSize =  [ (x[2]**2 + x[3]**2) for x in bboxXYWH_list]
            # bigIdx = np.argmax(diaSize)
            # bboxXYWH_list = [bboxXYWH_list[bigIdx]]

        g_debug_bboxonly= False
        if g_debug_bboxonly:
            if False:#len(bboxXYWH_list)>0:
                for bbr in bboxXYWH_list:
                    img_original_bgr = viewer2D.Vis_Bbox(img_original_bgr, bbr)
                    viewer2D.ImShow(img_original_bgr)
            g_timer.toc(average =True, bPrint=True,title="DetectionTime")


            # Capture raw videos (to make a sample data)
            viewer2D.ImShow(img_original_bgr)
            mergedImgFileName = '{0}/{1}'.format(mergedImageFolder,outputFileName%cur_frame)
            cv2.imwrite(mergedImgFileName, img_original_bgr)

            continue
        # g_timer.toc(average =True, bPrint=True,title="Detect")
       
        ######################################################
        ## Body Pose Regression

        if len(bboxXYWH_list)>0:

            mocap_out =[]
            # Old format
            # pred_rotmat_all =[]
            # pred_betas_all =[]
            # pred_camera_all =[]
            # pred_vertices_all =[]
            # pred_joints_3d_all =[]
            # bbox_all =[]
            # boxScale_o2n_all =[]
            # bboxTopLeft_all =[]

            for i, bboxXYHW in enumerate(bboxXYWH_list):

                subjectId = seqName + '_{:03d}'.format(i)       #Without tracking, this value is not consistent

                predoutput = bodymocap.regress(img_original_bgr, bboxXYHW)
                if predoutput is None:
                    continue
                pred_vertices_img = predoutput['pred_vertices_img']
                pred_joints_img = predoutput['pred_joints_img']
                
                    
                tempMesh = {'ver': pred_vertices_img, 'f':  bodymocap.smpl.faces}
                meshList.append(tempMesh)
                skelList.append(pred_joints_img.ravel()[:,np.newaxis])  #(49x3, 1)

                if args.pklout:

                    mocap_single = {
                            'parm_pose': predoutput['pred_rotmat'][0],           #(24,3, 3)
                            'parm_shape': predoutput['pred_betas'][0],             #(10,)
                            'parm_cam': predoutput['pred_camera'],           #[cam_scale, cam_offset_x,, cam_offset_y ]
                            'subjectId': subjectId,       
                            'pred_vertices_imgspace': predoutput['pred_vertices_img'],  #3D SMPL vertices where X,Y are aligned to images
                            'pred_joints_imgspace': predoutput['pred_joints_img'],      #3D joints where X,Y are aligned to images
                            'bbox_xyxy': predoutput['bbox_xyxy'],        #[minX,minY,maxX,maxY]
                            'bbox_topLeft': predoutput['bboxTopLeft'],   #(2,)       #auxiliary data used inside visualization
                            'bbox_scale_o2n': predoutput['boxScale_o2n'],      #scalar #auxiliary data used inside visualization
                            'smpltype': 'smpl',
                            'annotId': -1,
                            'imageName': fName

                            #Old format below
                            # pred_betas_all.append(predoutput['pred_betas'])
                            # pred_camera_all.append(predoutput['pred_camera'])
                            # pred_vertices_all.append(pred_vertices_img)
                            # pred_joints_3d_all.append(pred_joints_img)
                            # bbox_all.append(predoutput['bbox_xyxy'])
                            # bboxTopLeft_all.append(predoutput['bboxTopLeft'])
                            # boxScale_o2n_all.append(predoutput['boxScale_o2n'])
                        }
                    mocap_out.append(mocap_single)

                    #Old format below
                    # pred_rotmat_all.append(predoutput['pred_rotmat'])
                    # pred_betas_all.append(predoutput['pred_betas'])
                    # pred_camera_all.append(predoutput['pred_camera'])
                    # pred_vertices_all.append(pred_vertices_img)
                    # pred_joints_3d_all.append(pred_joints_img)
                    # bbox_all.append(predoutput['bbox_xyxy'])
                    # bboxTopLeft_all.append(predoutput['bboxTopLeft'])
                    # boxScale_o2n_all.append(predoutput['boxScale_o2n'])
        
            ######################################################
            ## Export to pkl
            if args.pklout and len(mocap_out)>0:

                # Old format below
                # pred_rotmat_all = np.concatenate(pred_rotmat_all,axis=0)
                # pred_betas_all = np.concatenate(pred_betas_all,axis=0)
                # pred_camera_all = np.concatenate(pred_camera_all,axis=0)
                # pred_vertices_all = np.concatenate(pred_vertices_all,axis=0)
                # pred_joints_3d_all = np.concatenate(pred_joints_3d_all,axis=0)
                # # bbox_all = np.concatenate(bbox_all)
                # # bboxTopLeft_all = np.concatenate(bboxTopLeft_all)
                # # boxScale_o2n_all =np.concatenate(boxScale_o2n_all)
                # dataOut = {
                #     'pred_rotmat_all': pred_rotmat_all,
                #     'pred_betas_all': pred_betas_all,
                #     # 'cams_person': pred_camera_all,
                #     'pred_camera_all': pred_camera_all,
                #     'pred_joints_3d_all': pred_joints_3d_all,
                #     # 'verts_person_og':pred_vertices_all,
                #     'pred_vertices_all':pred_vertices_all,
                #     'boxScale_o2n_all': boxScale_o2n_all,
                #     'bboxTopLeft_all': bboxTopLeft_all,
                #     'bbox':bbox_all
                # }
                if renderOutRoot is None:
                    print("Please set output folder by --out")
                    assert False
                    
                else:
                    mocapOutFolder = os.path.join(renderOutRoot,'mocap')
                    if not os.path.exists(mocapOutFolder):
                        os.mkdir(mocapOutFolder)

                    outputFileName_pkl = os.path.join(mocapOutFolder,os.path.basename(fName)[:-4]+'.pkl')
                    fout = open(outputFileName_pkl,'wb')
                    pickle.dump(mocap_out, fout)
                    fout.close()
        
        # g_timer.toc(average =True, bPrint=True,title="Detect+Regress")
        ######################################################
        ## Visualization

        if args.noVis == False:        #Visualize
            # img_original  = img_original_bgr[:,:,[2,1,0]]
            # img_original = np.ascontiguousarray(img_original, dtype=np.uint8)
            assert img_original_bgr.shape[0]>0 and img_original_bgr.shape[1]>0

            #Render output to files            
            if renderOutRoot:
                visualizer.visualize_screenless_naive(meshList, skelList, bboxXYWH_list, img_original_bgr)

                overlaidImg = visualizer.renderout['render_camview']
                overlaidImgFileName = '{0}/{1}'.format(overlaidImageFolder,outputFileName%cur_frame)
                cv2.imwrite(overlaidImgFileName, overlaidImg)

                sideImg = visualizer.renderout['render_sideview']
                sideImgFileName = '{0}/{1}'.format(sideImageFolder,outputFileName%cur_frame)
                cv2.imwrite(sideImgFileName, sideImg)

                if True:    #merged view rendering
                    # overlaidImg_resized = cv2.resize(overlaidImg, (img_original_bgr.shape[1], img_original_bgr.shape[0]))
                    img_original_bgr_resized = cv2.resize(img_original_bgr, (overlaidImg.shape[1], overlaidImg.shape[0]))
                    sideImg_resized = cv2.resize(sideImg, (overlaidImg.shape[1], overlaidImg.shape[0]))
                    mergedImg = np.concatenate( (img_original_bgr_resized, overlaidImg, sideImg_resized), axis=1)
                    viewer2D.ImShow(mergedImg,name="merged")

                    # viewer2D.ImShow(overlaidImg)
                    mergedImgFileName = '{0}/{1}'.format(mergedImageFolder,outputFileName%cur_frame)
                    cv2.imwrite(mergedImgFileName, mergedImg)
                    print(f"Saved to {mergedImgFileName}")

            #Do not save files but jut GUI visualization
            else:
                visualizer.visualize_gui_naive(meshList, skelList, bboxXYWH_list, img_original_bgr)
        g_timer.toc(average =True, bPrint=True,title="Detect+Regress+Vis")

    # When everything done, release the capture
    if cap is not None:
        cap.release()
    cv2.destroyAllWindows()

    # Video generation from rendered images
    if args.noVis == False and args.noVideoOut==False:
        if renderOutRoot and os.path.exists( os.path.join(renderOutRoot, 'merged') ): 
            print(">> Generating video in {}/{}.mp4".format(renderOutRoot,os.path.basename(renderOutRoot) ))
            inputFrameDir = os.path.join(renderOutRoot, 'merged')
            outVideo_fileName = os.path.join(renderOutRoot, os.path.basename(renderOutRoot)+'.mp4')
            ffmpeg_cmd = 'ffmpeg -y -f image2 -framerate 25 -pattern_type glob -i "{0}/*.jpg"  -pix_fmt yuv420p -c:v libx264 -x264opts keyint=25:min-keyint=25:scenecut=-1 -vf "scale=trunc(iw/2)*2:trunc(ih/2)*2" {1}'.format(inputFrameDir, outVideo_fileName)
            os.system(ffmpeg_cmd)
Ejemplo n.º 4
0
def visEFT_singleSubject(renderer):
    inputDir = args.fit_dir
    imgDir = args.img_dir

    smplModelDir = args.smpl_dir
    smpl = SMPL(smplModelDir, batch_size=1, create_transl=False)

    print("Loading coco annotation")
    cocoAnnotDic = loadCOCOAnnot()

    # outputFolder = os.path.basename(inputDir) + '_dpOut'
    # outputFolder =os.path.join('/run/media/hjoo/disk/data/eftout/',outputFolder)
    
    eft_fileList  = listdir(inputDir)       #Check all fitting files
    print(">> Found {} files in the fitting folder {}".format(len(eft_fileList), inputDir))
    totalCnt =0
    erroneousCnt =0

    for idx, f in enumerate(sorted(eft_fileList)):
        
        #Load EFT data
        fileFullPath = join(inputDir, f)
        with open(fileFullPath,'rb') as f:
            eft_data = pickle.load(f)

        #Get raw image path
        imgFullPath = eft_data['imageName'][0]
        imgName = os.path.basename(imgFullPath)
        imgFullPath =os.path.join(imgDir, os.path.basename(imgFullPath) )
        assert os.path.exists(imgFullPath)
        rawImg = cv2.imread(imgFullPath)
        print(f'Input image: {imgFullPath}')

        #EFT data
        bbox_scale = eft_data['scale'][0]
        bbox_center = eft_data['center'][0]

        pred_camera = eft_data['pred_camera']
        pred_betas = torch.from_numpy(eft_data['pred_shape'])
        pred_pose_rotmat = torch.from_numpy(eft_data['pred_pose_rotmat'])        

        #COCO only. Annotation index
        print("COCO annotId: {}".format(eft_data['annotId']))
        annot = cocoAnnotDic[eft_data['annotId'][0]]
        print(annot['bbox'])

        #Visualize COCO annotation
        annot_keypoint = np.reshape(np.array(annot['keypoints'], dtype=np.float32), (-1,3))     #17,3
        rawImg = viewer2D.Vis_Skeleton_2D_coco(annot_keypoint[:,:2],annot_keypoint[:,2], image=rawImg)
        rawImg = viewer2D.Vis_Bbox(rawImg, annot['bbox'])

        #Obtain skeleton and smpl data
        smpl_output = smpl(betas=pred_betas, body_pose=pred_pose_rotmat[:,1:], global_orient=pred_pose_rotmat[:,0].unsqueeze(1), pose2rot=False )
        smpl_vertices = smpl_output.vertices.detach().cpu().numpy() 
        smpl_joints_3d = smpl_output.joints.detach().cpu().numpy() 

        #Crop image
        croppedImg, boxScale_o2n, bboxTopLeft = crop_bboxInfo(rawImg, bbox_center, bbox_scale, (BBOX_IMG_RES, BBOX_IMG_RES) )

        ########################
        # Visualize
        if False:
            #Compute 2D reprojection error
            # if not (data['loss_keypoints_2d']<0.0001 or data['loss_keypoints_2d']>0.001 :
            #     continue
            maxBeta = abs(torch.max( abs(pred_betas)).item())
            if eft_data['loss_keypoints_2d']>0.0005 or maxBeta>3:
                erroneousCnt +=1
            print(">>> loss2d: {}, maxBeta: {}".format( eft_data['loss_keypoints_2d'],maxBeta) )
        
        # Visualize 2D image
        if True:
            viewer2D.ImShow(rawImg, name='rawImg', waitTime=1)      #You should press any key 
            viewer2D.ImShow(croppedImg, name='croppedImg', waitTime=1)

        # Visualization Mesh
        if True:    
            b=0
            camParam_scale = pred_camera[b,0]
            camParam_trans = pred_camera[b,1:]
            pred_vert_vis = smpl_vertices[b]
            smpl_joints_3d_vis = smpl_joints_3d[b]

            if args.onbbox:
                pred_vert_vis = convert_smpl_to_bbox(pred_vert_vis, camParam_scale, camParam_trans)
                smpl_joints_3d_vis = convert_smpl_to_bbox(smpl_joints_3d_vis, camParam_scale, camParam_trans)
                renderer.setBackgroundTexture(croppedImg)
                renderer.setViewportSize(croppedImg.shape[1], croppedImg.shape[0])
            else:
                #Covert SMPL to BBox first
                pred_vert_vis = convert_smpl_to_bbox(pred_vert_vis, camParam_scale, camParam_trans)
                smpl_joints_3d_vis = convert_smpl_to_bbox(smpl_joints_3d_vis, camParam_scale, camParam_trans)

                #From cropped space to original
                pred_vert_vis = convert_bbox_to_oriIm(pred_vert_vis, boxScale_o2n, bboxTopLeft, rawImg.shape[1], rawImg.shape[0]) 
                smpl_joints_3d_vis = convert_bbox_to_oriIm(smpl_joints_3d_vis, boxScale_o2n, bboxTopLeft, rawImg.shape[1], rawImg.shape[0])
                renderer.setBackgroundTexture(rawImg)
                renderer.setViewportSize(rawImg.shape[1], rawImg.shape[0])

            pred_meshes = {'ver': pred_vert_vis, 'f': smpl.faces}
            v = pred_meshes['ver'] 
            f = pred_meshes['f']

            #Visualize in the original image space
            renderer.showBackground(True)
            renderer.set_mesh(v,f)
            renderer.display()
            out_all_f = renderer.get_screen_color_ibgr()
            # out_all_f = render.get_z_value()
            viewer2D.ImShow(out_all_f,waitTime=0)

            if True:    #Save the rendered image to files
                if os.path.exists(args.render_dir) == False:
                    os.mkdir(args.render_dir)
                render_output_path = args.render_dir + '/render_{:08d}.jpg'.format(idx)
                print(f"Save to {render_output_path}")
                cv2.imwrite(render_output_path, out_all_f*255.0)

    print("erroneous Num : {}/{} ({} percent)".format(erroneousCnt,totalCnt, float(erroneousCnt)*100/totalCnt))
Ejemplo n.º 5
0
def visEFT_singleSubject(renderer):

    bStopForEachSample = args.waitforkeys  #if True, it will wait for any key pressed to move to the next sample

    inputData = args.fit_data
    imgDir = args.img_dir

    #Load SMPL model
    smplModelPath = args.smpl_dir + '/basicModel_neutral_lbs_10_207_0_v1.0.0.pkl'
    smpl = SMPL(smplModelPath, batch_size=1, create_transl=False)

    print("Loading coco annotation from:{}".format(args.cocoAnnotFile))
    assert os.path.exists(args.cocoAnnotFile)
    cocoAnnotDic = loadCOCOAnnot(args.cocoAnnotFile)

    #Load EFT fitting data
    print(f"Loading EFT data from {inputData}")
    if os.path.exists(inputData):
        with open(inputData, 'r') as f:
            eft_data = json.load(f)
            print("EFT data: ver {}".format(eft_data['ver']))
            eft_data_all = eft_data['data']
    else:
        print(f"ERROR:: Cannot find EFT data: {inputData}")
        assert False

    #Visualize each EFT Fitting output
    for idx, eft_data in enumerate(eft_data_all):

        #Get raw image path
        imgFullPath = eft_data['imageName']
        imgName = os.path.basename(imgFullPath)
        imgFullPath = os.path.join(imgDir, imgName)
        if os.path.exists(imgFullPath) == False:
            print(f"Img path is not valid: {imgFullPath}")
            assert False
        rawImg = cv2.imread(imgFullPath)
        print(f'Input image: {imgFullPath}')

        #EFT data
        bbox_scale = eft_data['bbox_scale']
        bbox_center = eft_data['bbox_center']

        pred_camera = np.array(eft_data['parm_cam'])
        pred_betas = np.reshape(
            np.array(eft_data['parm_shape'], dtype=np.float32),
            (1, 10))  #(10,)
        pred_betas = torch.from_numpy(pred_betas)

        pred_pose_rotmat = np.reshape(
            np.array(eft_data['parm_pose'], dtype=np.float32),
            (1, 24, 3, 3))  #(24,3,3)
        pred_pose_rotmat = torch.from_numpy(pred_pose_rotmat)

        keypoint_2d_validity = eft_data['joint_validity_openpose18']

        #COCO only. Annotation index
        print("COCO annotId: {}".format(eft_data['annotId']))
        annot = cocoAnnotDic[eft_data['annotId']]
        print(annot['bbox'])

        ########################
        #Visualize COCO annotation
        annot_keypoint = np.reshape(
            np.array(annot['keypoints'], dtype=np.float32), (-1, 3))  #17,3
        rawImg = viewer2D.Vis_Skeleton_2D_coco(annot_keypoint[:, :2],
                                               annot_keypoint[:, 2],
                                               image=rawImg)
        rawImg = viewer2D.Vis_Bbox(rawImg, annot['bbox'], color=(0, 255, 0))

        #Get SMPL mesh and joints from SMPL parameters
        smpl_output = smpl(betas=pred_betas,
                           body_pose=pred_pose_rotmat[:, 1:],
                           global_orient=pred_pose_rotmat[:, [0]],
                           pose2rot=False)
        smpl_vertices = smpl_output.vertices.detach().cpu().numpy()[0]
        smpl_joints_3d = smpl_output.joints.detach().cpu().numpy()[0]

        #Crop image using cropping information
        croppedImg, boxScale_o2n, bboxTopLeft = crop_bboxInfo(
            rawImg, bbox_center, bbox_scale, (BBOX_IMG_RES, BBOX_IMG_RES))

        ########################
        # Visualization of EFT
        ########################

        # Visualize 2D image
        if True:
            viewer2D.ImShow(rawImg, name='rawImg',
                            waitTime=1)  #You should press any key
            viewer2D.ImShow(croppedImg, name='croppedImg', waitTime=1)

            #Convert bbox_center, bbox_scale --> bbox_xyxy
            bbox_xyxy = conv_bboxinfo_bboxXYXY(bbox_scale, bbox_center)
            img_bbox = viewer2D.Vis_Bbox_minmaxPt(rawImg.copy(), bbox_xyxy[:2],
                                                  bbox_xyxy[2:])
            viewer2D.ImShow(img_bbox, name='img_bbox', waitTime=1)

        # Visualization Mesh
        if True:
            camParam_scale = pred_camera[0]
            camParam_trans = pred_camera[1:]
            pred_vert_vis = smpl_vertices
            smpl_joints_3d_vis = smpl_joints_3d

            if True:  #args.onbbox:
                pred_vert_vis = convert_smpl_to_bbox(pred_vert_vis,
                                                     camParam_scale,
                                                     camParam_trans)
                smpl_joints_3d_vis = convert_smpl_to_bbox(
                    smpl_joints_3d_vis, camParam_scale, camParam_trans)
                renderer.setBackgroundTexture(croppedImg)
                renderer.setViewportSize(croppedImg.shape[1],
                                         croppedImg.shape[0])

            pred_meshes = {'ver': pred_vert_vis, 'f': smpl.faces}
            v = pred_meshes['ver']
            f = pred_meshes['f']

            #Visualize in the original image space
            renderer.set_mesh(v, f)
            renderer.showBackground(True)
            renderer.setWorldCenterBySceneCenter()
            renderer.setCameraViewMode("cam")

            renderer.setViewportSize(croppedImg.shape[1], croppedImg.shape[0])
            renderer.display()
            renderImg = renderer.get_screen_color_ibgr()
            viewer2D.ImShow(renderImg, waitTime=1)

        # Visualization Mesh on side view
        if True:
            renderer.showBackground(False)
            renderer.setWorldCenterBySceneCenter()
            renderer.setCameraViewMode("side")

            renderer.setViewportSize(croppedImg.shape[1], croppedImg.shape[0])
            renderer.display()
            sideImg = renderer.get_screen_color_ibgr()  #Overwite on rawImg
            viewer2D.ImShow(sideImg, waitTime=1)

            sideImg = cv2.resize(sideImg,
                                 (renderImg.shape[1], renderImg.shape[0]))

        #Visualize camera view and side view
        saveImg = np.concatenate((renderImg, sideImg), axis=1)

        if bStopForEachSample:
            viewer2D.ImShow(
                saveImg, waitTime=0
            )  #waitTime=0 means that it will wait for any key pressed
        else:
            viewer2D.ImShow(saveImg, waitTime=1)

        #Save the rendered image to files
        if False:
            if os.path.exists(args.render_dir) == False:
                os.mkdir(args.render_dir)
            render_output_path = args.render_dir + '/render_{:08d}.jpg'.format(
                idx)
            print(f"Save to {render_output_path}")
            cv2.imwrite(render_output_path, saveImg)
Ejemplo n.º 6
0
def exportOursToSpin(cocoPose3DAll, out_path):

    scaleFactor = 1.2

    # annotation files
    annot_file = os.path.join('/home/hjoo/codes/SPIN/data', 'train.h5')
    # read annotations
    f = h5py.File(annot_file, 'r')
    centers, imgnames, parts, scales = \
        f['center'], f['imgname'], f['part'], f['scale']

    # structs we need
    imgnames_, scales_, centers_, parts_, openposes_ = [], [], [], [], []

    #additional 3D
    poses_, shapes_, skel3D_, has_smpl_ = [], [], [], []

    # for imgSample in cocoPose3DAll:
    imgNum = len(cocoPose3DAll)
    totalSampleNum = [
        len(cocoPose3DAll[imgSample]) for imgSample in cocoPose3DAll
    ]
    totalSampleNum = sum(totalSampleNum)

    print("\n\n### ImageNum: {}, SampleNum: {} ###".format(
        imgNum, totalSampleNum))
    # for imgSample in cocoPose3DAll:
    # for key_imgId, imgSample in sorted(cocoPose3DAll.items()):
    for key_imgId, imgSample in sorted(cocoPose3DAll.items()):
        #load image
        fileName_saved = os.path.join('images', imgSample[0]['imgId'])
        # fileName = os.path.basename(imgPathFull)
        # fileName_saved = os.path.join(os.path.basename(os.path.dirname(imgPathFull)), fileName) #start from train2014

        for sample in imgSample:

            validJointNum = np.sum(sample['pose2D_validity'][::2])

            if validJointNum < 4:
                continue

            if np.isnan(sample['pose3DParam']['camScale']):
                continue

            #visualize
            # if True:        #Save BBox
            #     inputImg_2dvis = inputImg.copy()
            #     inputImg_2dvis = viewer2D.Vis_Bbox(inputImg_2dvis,sample['bbr'])
            #     # viewer2D.ImShow(inputImg_2dvis)
            #     imgFilePath = '{0}/{1}.jpg'.format(bboxFolder, fileName)
            #     cv2.imwrite(imgFilePath, inputImg_2dvis)

            if 'bbr' in sample.keys():
                bbox = sample['bbr']
            else:

                keypoints = np.reshape(sample['pose2D_gt'], (-1, 2))  #26,2
                valid = sample['pose2D_validity'][::2]  #(26,)
                valid_keypoints = keypoints[valid]

                min_pt = np.min(valid_keypoints, axis=0)
                max_pt = np.max(valid_keypoints, axis=0)
                bbox = [
                    min_pt[0], min_pt[1], max_pt[0] - min_pt[0],
                    max_pt[1] - min_pt[1]
                ]

            #Debug Visualization
            if False:
                imgPathFull = os.path.join(
                    '/run/media/hjoo/disk/data/mpii_human_pose_v1',
                    fileName_saved)
                inputImg_2dvis = cv2.imread(imgPathFull)
                inputImg_2dvis = viewer2D.Vis_Skeleton_2D_smplCOCO(
                    sample['pose2D_gt'],
                    pt2d_visibility=sample['pose2D_validity'],
                    image=inputImg_2dvis)
                inputImg_2dvis = viewer2D.Vis_Bbox(inputImg_2dvis, bbox)
                viewer2D.ImShow(inputImg_2dvis, waitTime=0)
                # continue

            center = [bbox[0] + bbox[2] / 2, bbox[1] + bbox[3] / 2]
            scale = scaleFactor * max(bbox[2], bbox[3]) / 200

            #Save data
            imgnames_.append(fileName_saved)
            openposes_.append(np.zeros([25, 3]))  #blank
            centers_.append(center)
            scales_.append(scale)
            has_smpl_.append(1)
            poses_.append(sample['pose3DParam']['pose'])  #(72,)
            shapes_.append(sample['pose3DParam']['shape'])  #(10,)

            #2D keypoints (total26 -> SPIN24)
            poseidx_spin24 = [
                0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 19, 20, 21, 22, 23, 13
            ]  #13 head top
            poseidx_total26 = [
                0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 14, 15, 16, 17, 18, 19
            ]  #19 head top
            part = np.zeros([24, 3])
            gt_skel = np.reshape(sample['pose2D_gt'], (26, -1))
            gt_validity = np.reshape(sample['pose2D_validity'], (26, -1))
            part[poseidx_spin24, :2] = gt_skel[
                poseidx_total26]  #(52,)  totalGT26 type
            part[poseidx_spin24, 2] = 1 * gt_validity[poseidx_total26, 0]
            parts_.append(part)

            #3D joint
            S = np.zeros([24, 4])
            S[poseidx_spin24, :3] = sample['pose3D_pred'][poseidx_total26, :]
            S[poseidx_spin24, 3] = 1

            skel3D_.append(S)

            # keypoints

    # store the data struct
    if not os.path.isdir(out_path):
        os.makedirs(out_path)
    out_file = os.path.join(out_path,
                            'mpi_train_wShape_1537_ep200_exemplar.npz')

    np.savez(out_file,
             imgname=imgnames_,
             center=centers_,
             scale=scales_,
             part=parts_,
             openpose=openposes_,
             pose=poses_,
             shape=shapes_,
             has_smpl=has_smpl_,
             S=skel3D_)