def getSMPLoutput_imgSpace(smpl, pred_output, bboxCenter, bboxScale, imgShape, bUseSMPLX=False): """ From prediction output, obtain smpl mesh and joint Aditionally, converting smpl output (vert and joint) to original image space TODO: Currently just assume single batch Input: pred_output['pred_shape'] pred_output['pred_rotmat'] pred_output['pred_camera'] """ smpl_output, smpl_output_bbox = getSMPLoutput_bboxSpace( smpl, pred_output, bUseSMPLX) #Bbox space to image space if len(bboxScale.shape) == 2: bboxScale = bboxScale[0] bboxCenter = bboxCenter[0] bboxScale_o2n, bboxTopLeft_inOriginal = conv_bboxinfo_center2topleft( bboxScale, bboxCenter) smpl_output_imgspace = {} for k in smpl_output_bbox.keys(): if "mesh" in k: mesh_data = smpl_output_bbox[k] newMesh = {} newMesh['f'] = mesh_data['f'] newMesh['ver'] = convert_bbox_to_oriIm( mesh_data['ver'].copy(), bboxScale_o2n, bboxTopLeft_inOriginal, imgShape[1], imgShape[0]) #2D bbox -> original 2D image smpl_output_imgspace[k] = newMesh else: # print(k) data3D = smpl_output_bbox[k] if data3D.shape[1] == 1: data3D = np.reshape(data3D, (-1, 3)) smpl_output_imgspace[k] = convert_bbox_to_oriIm( data3D, bboxScale_o2n, bboxTopLeft_inOriginal, imgShape[1], imgShape[0]) #2D bbox -> original 2D image\ # smpl_output_imgspace[k] = np.shape(smpl_output_imgspace[k], (-1,1) ) return smpl_output, smpl_output_bbox, smpl_output_imgspace
def visEFT_singleSubject(renderer): inputDir = args.fit_dir imgDir = args.img_dir smplModelDir = args.smpl_dir smpl = SMPL(smplModelDir, batch_size=1, create_transl=False) # outputFolder = os.path.basename(inputDir) + '_dpOut' # outputFolder =os.path.join('/run/media/hjoo/disk/data/eftout/',outputFolder) eft_fileList = listdir(inputDir) #Check all fitting files print(">> Found {} files in the fitting folder {}".format(len(eft_fileList), inputDir)) totalCnt =0 erroneousCnt =0 for idx, f in enumerate(sorted(eft_fileList)): #Load EFT data fileFullPath = join(inputDir, f) with open(fileFullPath,'rb') as f: eft_data = pickle.load(f) #Get raw image path imgFullPath = eft_data['imageName'][0] imgName = os.path.basename(imgFullPath) imgFullPath =os.path.join(imgDir, os.path.basename(imgFullPath) ) assert os.path.exists(imgFullPath) rawImg = cv2.imread(imgFullPath) print(f'Input image: {imgFullPath}') #EFT data bbox_scale = eft_data['scale'][0] bbox_center = eft_data['center'][0] pred_camera = eft_data['pred_camera'] pred_betas = torch.from_numpy(eft_data['pred_shape']) pred_pose_rotmat = torch.from_numpy(eft_data['pred_pose_rotmat']) #COCO only. Annotation index print("COCO annotId: {}".format(eft_data['annotId'])) #Obtain skeleton and smpl data smpl_output = smpl(betas=pred_betas, body_pose=pred_pose_rotmat[:,1:], global_orient=pred_pose_rotmat[:,0].unsqueeze(1), pose2rot=False ) smpl_vertices = smpl_output.vertices.detach().cpu().numpy() smpl_joints_3d = smpl_output.joints.detach().cpu().numpy() #Crop image croppedImg, boxScale_o2n, bboxTopLeft = crop_bboxInfo(rawImg, bbox_center, bbox_scale, (BBOX_IMG_RES, BBOX_IMG_RES) ) ######################## # Visualize if False: #Compute 2D reprojection error # if not (data['loss_keypoints_2d']<0.0001 or data['loss_keypoints_2d']>0.001 : # continue maxBeta = abs(torch.max( abs(pred_betas)).item()) if eft_data['loss_keypoints_2d']>0.0005 or maxBeta>3: erroneousCnt +=1 print(">>> loss2d: {}, maxBeta: {}".format( eft_data['loss_keypoints_2d'],maxBeta) ) # Visualize 2D image if False: viewer2D.ImShow(rawImg, name='rawImg', waitTime=1) #You should press any key viewer2D.ImShow(croppedImg, name='croppedImg', waitTime=1) # Visualization Mesh if True: b=0 camParam_scale = pred_camera[b,0] camParam_trans = pred_camera[b,1:] pred_vert_vis = smpl_vertices[b] smpl_joints_3d_vis = smpl_joints_3d[b] if args.onbbox: pred_vert_vis = convert_smpl_to_bbox(pred_vert_vis, camParam_scale, camParam_trans) smpl_joints_3d_vis = convert_smpl_to_bbox(smpl_joints_3d_vis, camParam_scale, camParam_trans) renderer.setBackgroundTexture(croppedImg) renderer.setViewportSize(croppedImg.shape[1], croppedImg.shape[0]) else: #Covert SMPL to BBox first pred_vert_vis = convert_smpl_to_bbox(pred_vert_vis, camParam_scale, camParam_trans) smpl_joints_3d_vis = convert_smpl_to_bbox(smpl_joints_3d_vis, camParam_scale, camParam_trans) #From cropped space to original pred_vert_vis = convert_bbox_to_oriIm(pred_vert_vis, boxScale_o2n, bboxTopLeft, rawImg.shape[1], rawImg.shape[0]) smpl_joints_3d_vis = convert_bbox_to_oriIm(smpl_joints_3d_vis, boxScale_o2n, bboxTopLeft, rawImg.shape[1], rawImg.shape[0]) renderer.setBackgroundTexture(rawImg) renderer.setViewportSize(rawImg.shape[1], rawImg.shape[0]) pred_meshes = {'ver': pred_vert_vis, 'f': smpl.faces} v = pred_meshes['ver'] f = pred_meshes['f'] #Visualize in the original image space renderer.set_mesh(v,f) renderer.showBackground(True) renderer.setWorldCenterBySceneCenter() renderer.setCameraViewMode("cam") renderer.setViewportSize(rawImg.shape[1], rawImg.shape[0]) renderer.display() renderImg = renderer.get_screen_color_ibgr() viewer2D.ImShow(renderImg,waitTime=1) # out_all_f = render.get_z_value() # Visualization Mesh on side view if True: # renderer.set_viewpoint() renderer.showBackground(False) renderer.setWorldCenterBySceneCenter() renderer.setCameraViewMode("side") renderer.setViewportSize(rawImg.shape[1], rawImg.shape[0]) renderer.display() sideImg = renderer.get_screen_color_ibgr() #Overwite on rawImg viewer2D.ImShow(sideImg,waitTime=1) sideImg = cv2.resize(sideImg, (renderImg.shape[1], renderImg.shape[0]) ) saveImg = np.concatenate( (renderImg,sideImg), axis =1) viewer2D.ImShow(saveImg,waitTime=0) if True: #Save the rendered image to files if os.path.exists(args.render_dir) == False: os.mkdir(args.render_dir) render_output_path = args.render_dir + '/render_{:08d}.jpg'.format(idx) print(f"Save to {render_output_path}") cv2.imwrite(render_output_path, saveImg) print("erroneous Num : {}/{} ({} percent)".format(erroneousCnt,totalCnt, float(erroneousCnt)*100/totalCnt))
def visEFT_multiSubjects(renderer): inputDir = args.fit_dir imgDir = args.img_dir smplModelDir = args.smpl_dir smpl = SMPL(smplModelDir, batch_size=1, create_transl=False) eft_fileList = listdir(inputDir) #Check all fitting files print(">> Found {} files in the fitting folder {}".format(len(eft_fileList), inputDir)) #Aggregate all efl per image eft_perimage ={} for f in sorted(eft_fileList): #Load imageName = f[:f.rfind('_')] if imageName not in eft_perimage.keys(): eft_perimage[imageName] =[] eft_perimage[imageName].append(f) for imgName in eft_perimage: eftFiles_perimage = eft_perimage[imgName] renderer.clear_mesh() for idx,f in enumerate(eftFiles_perimage): #Load EFT data fileFullPath = join(inputDir, f) with open(fileFullPath,'rb') as f: eft_data = pickle.load(f) #Get raw image path if idx==0: imgFullPath = eft_data['imageName'][0] imgFullPath =os.path.join(imgDir, os.path.basename(imgFullPath) ) assert os.path.exists(imgFullPath) rawImg = cv2.imread(imgFullPath) print(f'Input image: {imgFullPath}') #EFT data bbox_scale = eft_data['scale'][0] bbox_center = eft_data['center'][0] pred_camera = eft_data['pred_camera'] pred_betas = torch.from_numpy(eft_data['pred_shape']) pred_pose_rotmat = torch.from_numpy(eft_data['pred_pose_rotmat']) #COCO only. Annotation index print("COCO annotId: {}".format(eft_data['annotId'])) #Obtain skeleton and smpl data smpl_output = smpl(betas=pred_betas, body_pose=pred_pose_rotmat[:,1:], global_orient=pred_pose_rotmat[:,0].unsqueeze(1), pose2rot=False ) smpl_vertices = smpl_output.vertices.detach().cpu().numpy() smpl_joints_3d = smpl_output.joints.detach().cpu().numpy() #Crop image croppedImg, boxScale_o2n, bboxTopLeft = crop_bboxInfo(rawImg.copy(), bbox_center, bbox_scale, (BBOX_IMG_RES, BBOX_IMG_RES) ) ######################## # Visualize # Visualize 2D image if False: viewer2D.ImShow(rawImg, name='rawImg', waitTime=1) #You should press any key viewer2D.ImShow(croppedImg, name='croppedImg', waitTime=0) # Visualization Mesh on raw images if True: b=0 camParam_scale = pred_camera[b,0] camParam_trans = pred_camera[b,1:] pred_vert_vis = smpl_vertices[b] smpl_joints_3d_vis = smpl_joints_3d[b] if False:#args.onbbox: #Always in the original image pred_vert_vis = convert_smpl_to_bbox(pred_vert_vis, camParam_scale, camParam_trans) smpl_joints_3d_vis = convert_smpl_to_bbox(smpl_joints_3d_vis, camParam_scale, camParam_trans) renderer.setBackgroundTexture(croppedImg) renderer.setViewportSize(croppedImg.shape[1], croppedImg.shape[0]) else: #Covert SMPL to BBox first pred_vert_vis = convert_smpl_to_bbox(pred_vert_vis, camParam_scale, camParam_trans) smpl_joints_3d_vis = convert_smpl_to_bbox(smpl_joints_3d_vis, camParam_scale, camParam_trans) #From cropped space to original pred_vert_vis = convert_bbox_to_oriIm(pred_vert_vis, boxScale_o2n, bboxTopLeft, rawImg.shape[1], rawImg.shape[0]) smpl_joints_3d_vis = convert_bbox_to_oriIm(smpl_joints_3d_vis, boxScale_o2n, bboxTopLeft, rawImg.shape[1], rawImg.shape[0]) renderer.setBackgroundTexture(rawImg) renderer.setViewportSize(rawImg.shape[1], rawImg.shape[0]) pred_meshes = {'ver': pred_vert_vis, 'f': smpl.faces} v = pred_meshes['ver'] f = pred_meshes['f'] #Visualize in the original image spaceq # renderer.set_mesh(v,f) renderer.add_mesh(v,f) #Render Mesh on the camera view renderer.showBackground(True) renderer.setWorldCenterBySceneCenter() renderer.setCameraViewMode("cam") renderer.display() overlaid = renderer.get_screen_color_ibgr() #Overwite on rawImg viewer2D.ImShow(overlaid,waitTime=1,name="overlaid") #Render Mesh on the rotating view renderer.showBackground(False) renderer.setWorldCenterBySceneCenter() renderer.setCameraViewMode("free") for i in range(90): renderer.setViewAngle(i*4,0) renderer.display() sideImg = renderer.get_screen_color_ibgr() #Overwite on rawImg viewer2D.ImShow(sideImg,waitTime=1,name="otherviews") if True: #Save the rendered image to files if os.path.exists(args.render_dir) == False: os.mkdir(args.render_dir) render_output_path = args.render_dir + '/render_{}.jpg'.format(imgName) print(f"Save to {render_output_path}") cv2.imwrite(render_output_path, rawImg)
def visEFT_singleSubject(inputDir, imDir, smplModelDir, bUseSMPLX): if bUseSMPLX: smpl = SMPLX(smplModelDir, batch_size=1, create_transl=False) else: smpl = SMPL(smplModelDir, batch_size=1, create_transl=False) fileList = listdir(inputDir) #Check all fitting files print(">> Found {} files in the fitting folder {}".format(len(fileList), inputDir)) totalCnt =0 erroneousCnt =0 # fileList =['00_00_00008422_0.pkl', '00_00_00008422_1731.pkl', '00_00_00008422_3462.pkl'] #debug for f in sorted(fileList): #Load fileFullPath = join(inputDir, f) with open(fileFullPath,'rb') as f: dataDict = pickle.load(f) print(f"Loaded :{fileFullPath}") if 'imageName' in dataDict.keys(): #If this pkl has only one instance. Made this to hand panoptic output where pkl has multi instances dataDict = {0:dataDict} for jj, k in enumerate(dataDict): if jj%50 !=0: continue data = dataDict[k] # print(data['subjectId']) # continue if 'smpltype' in data: if (data['smpltype'] =='smpl' and bUseSMPLX) or (data['smpltype'] =='smplx' and bUseSMPLX==False): print("SMPL type mismatch error") assert False imgFullPathOri = data['imageName'][0] imgFullPath = os.path.join(imDir, os.path.basename(imgFullPathOri)) data['subjectId'] =0 #TODO debug fileName = "{}_{}".format(data['subjectId'], os.path.basename(imgFullPathOri)[:-4]) if args.bRenderToFiles and os.path.exists(os.path.join(render_dirName, fileName+".jpg")): continue if True: #Additional path checking, if not valid if os.path.exists(imgFullPath) == False: imgFullPath =getpath_level(imDir, imgFullPathOri ,1) if os.path.exists(imgFullPath) == False: imgFullPath =getpath_level(imDir, imgFullPathOri,2) if os.path.exists(imgFullPath) == False: imgFullPath =getpath_level(imDir, imgFullPathOri, 3 ) scale = data['scale'][0] center = data['center'][0] # print(data['annotId']) ours_betas = torch.from_numpy(data['pred_shape']) ours_pose_rotmat = torch.from_numpy(data['pred_pose_rotmat']) # spin_betas = torch.from_numpy(data['opt_beta']) #Compute 2D reprojection error # if not (data['loss_keypoints_2d']<0.0001 or data['loss_keypoints_2d']>0.001 : # continue maxBeta = abs(torch.max( abs(ours_betas)).item()) if data['loss_keypoints_2d']>0.0005 or maxBeta>3: erroneousCnt +=1 print(">>> loss2d: {}, maxBeta: {}".format( data['loss_keypoints_2d'],maxBeta) ) # spin_pose = torch.from_numpy(data['opt_pose']) pred_camera_vis = data['pred_camera'] if os.path.exists(imgFullPath) == False: print(imgFullPath) assert os.path.exists(imgFullPath) rawImg = cv2.imread(imgFullPath) print(imgFullPath) croppedImg, boxScale_o2n, bboxTopLeft = crop_bboxInfo(rawImg, center, scale, (BBOX_IMG_RES, BBOX_IMG_RES) ) #Visualize 2D image if args.bRenderToFiles ==False: viewer2D.ImShow(rawImg, name='rawImg', waitTime=10) #You should press any key viewer2D.ImShow(croppedImg, name='croppedImg', waitTime=10) ours_output = smpl(betas=ours_betas, body_pose=ours_pose_rotmat[:,1:], global_orient=ours_pose_rotmat[:,0].unsqueeze(1), pose2rot=False ) ours_vertices = ours_output.vertices.detach().cpu().numpy() ours_joints_3d = ours_output.joints.detach().cpu().numpy() #Visualize 3D mesh and 3D skeleton in BBox Space if True: b =0 camParam_scale = pred_camera_vis[b,0] camParam_trans = pred_camera_vis[b,1:] ############### Visualize Mesh ############### pred_vert_vis = ours_vertices[b].copy() pred_vert_vis = convert_smpl_to_bbox(pred_vert_vis, camParam_scale, camParam_trans) pred_meshes = {'ver': pred_vert_vis, 'f': smpl.faces} glViewer.setMeshData([pred_meshes], bComputeNormal= True) ################ Visualize Skeletons ############### #Vis pred-SMPL joint pred_joints_vis = ours_joints_3d[b,:,:3].copy() #(N,3) pred_joints_vis = convert_smpl_to_bbox(pred_joints_vis, camParam_scale, camParam_trans) glViewer.setSkeleton( [pred_joints_vis.ravel()[:,np.newaxis]]) ################ Other 3D setup############### glViewer.setBackgroundTexture(croppedImg) glViewer.setWindowSize(croppedImg.shape[1]*args.windowscale, croppedImg.shape[0]*args.windowscale) glViewer.SetOrthoCamera(True) print("Press 'q' in the 3D window to go to the next sample") glViewer.show(0) #Visualize 3D mesh and 3D skeleton on original image space if True: b =0 camParam_scale = pred_camera_vis[b,0] camParam_trans = pred_camera_vis[b,1:] ############### Visualize Mesh ############### pred_vert_vis = ours_vertices[b].copy() pred_vert_vis = convert_smpl_to_bbox(pred_vert_vis, camParam_scale, camParam_trans) #From cropped space to original pred_vert_vis = convert_bbox_to_oriIm(pred_vert_vis, boxScale_o2n, bboxTopLeft, rawImg.shape[1], rawImg.shape[0]) pred_meshes = {'ver': pred_vert_vis, 'f': smpl.faces} glViewer.setMeshData([pred_meshes], bComputeNormal= True) # ################ Visualize Skeletons ############### #Vis pred-SMPL joint pred_joints_vis = ours_joints_3d[b,:,:3].copy() #(N,3) pred_joints_vis = convert_smpl_to_bbox(pred_joints_vis, camParam_scale, camParam_trans) pred_joints_vis = convert_bbox_to_oriIm(pred_joints_vis, boxScale_o2n, bboxTopLeft, rawImg.shape[1], rawImg.shape[0]) glViewer.setSkeleton( [pred_joints_vis.ravel()[:,np.newaxis]]) glViewer.setBackgroundTexture(rawImg) glViewer.setWindowSize(rawImg.shape[1]*args.magnifyFactor, rawImg.shape[0]*args.magnifyFactor) glViewer.SetOrthoCamera(True) print("Press 'q' in the 3D window to go to the next sample") if args.bRenderToFiles: #Export rendered files if os.path.exists(render_dirName) == False: #make a output folder if necessary os.mkdir(render_dirName) # subjId = data['subjectId'][22:24] fileName = "{}_{}".format(data['subjectId'], os.path.basename(imgFullPathOri)[:-4]) # rawImg = cv2.putText(rawImg,data['subjectId'],(100,100), cv2.FONT_HERSHEY_PLAIN, 2, (255,255,0),2) glViewer.render_on_image(render_dirName, fileName, rawImg) print(f"Render to {fileName}")
def visEFT_multiSubjects(inputDir, imDir, smplModelDir, bUseSMPLX = False): if bUseSMPLX: smpl = SMPLX(smplModelDir, batch_size=1, create_transl=False) else: smpl = SMPL(smplModelDir, batch_size=1, create_transl=False) fileList = listdir(inputDir) #Check all fitting files print(">> Found {} files in the fitting folder {}".format(len(fileList), inputDir)) totalCnt =0 erroneousCnt =0 #Merge sample from the same image data_perimage ={} for f in sorted(fileList): if "_init" in f: continue #Load imageName = f[:f.rfind('_')] if imageName not in data_perimage.keys(): data_perimage[imageName] =[] data_perimage[imageName].append(f) for imgName in data_perimage: eftFileNames = data_perimage[imgName] meshData =[] skelData =[] for f in eftFileNames: fileFullPath = join(inputDir, f) with open(fileFullPath,'rb') as f: data = pickle.load(f) imgFullPathOri = data['imageName'][0] imgFullPath = os.path.join(imDir, os.path.basename(imgFullPathOri)) if True: #Additional path checking, if not valid if os.path.exists(imgFullPath) == False: imgFullPath =getpath_level(imDir, imgFullPathOri ,1) if os.path.exists(imgFullPath) == False: imgFullPath =getpath_level(imDir, imgFullPathOri,2) if os.path.exists(imgFullPath) == False: imgFullPath =getpath_level(imDir, imgFullPathOri, 3 ) scale = data['scale'][0] center = data['center'][0] ours_betas = torch.from_numpy(data['pred_shape']) ours_pose_rotmat = torch.from_numpy(data['pred_pose_rotmat']) # spin_betas = torch.from_numpy(data['opt_beta']) #Compute 2D reprojection error # if not (data['loss_keypoints_2d']<0.0001 or data['loss_keypoints_2d']>0.001 : # continue maxBeta = abs(torch.max( abs(ours_betas)).item()) if data['loss_keypoints_2d']>0.0005 or maxBeta>3: erroneousCnt +=1 print(">>> loss2d: {}, maxBeta: {}".format( data['loss_keypoints_2d'],maxBeta) ) # spin_pose = torch.from_numpy(data['opt_pose']) pred_camera_vis = data['pred_camera'] assert os.path.exists(imgFullPath) rawImg = cv2.imread(imgFullPath) print(imgFullPath) croppedImg, boxScale_o2n, bboxTopLeft = crop_bboxInfo(rawImg, center, scale, (constants.IMG_RES, constants.IMG_RES) ) #Visualize 2D image if args.bRenderToFiles ==False: viewer2D.ImShow(rawImg, name='rawImg', waitTime=10) #You should press any key viewer2D.ImShow(croppedImg, name='croppedImg', waitTime=10) if bUseSMPLX: ours_output = smpl(betas=ours_betas, body_pose=ours_pose_rotmat[:,1:-2], global_orient=ours_pose_rotmat[:,0].unsqueeze(1), pose2rot=False ) # ours_output = smpl() #Default test else: ours_output = smpl(betas=ours_betas, body_pose=ours_pose_rotmat[:,1:], global_orient=ours_pose_rotmat[:,0].unsqueeze(1), pose2rot=False ) # ours_output = smpl() #Default test ours_vertices = ours_output.vertices.detach().cpu().numpy() ours_joints_3d = ours_output.joints.detach().cpu().numpy() if False: #Debugging # ours_vertices = ours_vertices - ours_joints_3d[0,12,:] save_mesh_obj(ours_vertices[0], smpl.faces, 'test.obj') #Visualize 3D mesh and 3D skeleton on original image space if True: b =0 camParam_scale = pred_camera_vis[b,0] camParam_trans = pred_camera_vis[b,1:] ############### Visualize Mesh ############### pred_vert_vis = ours_vertices[b].copy() pred_vert_vis = convert_smpl_to_bbox(pred_vert_vis, camParam_scale, camParam_trans) #From cropped space to original pred_vert_vis = convert_bbox_to_oriIm(pred_vert_vis, boxScale_o2n, bboxTopLeft, rawImg.shape[1], rawImg.shape[0]) pred_meshes = {'ver': pred_vert_vis, 'f': smpl.faces} # glViewer.setMeshData([pred_meshes], bComputeNormal= True) # ################ Visualize Skeletons ############### #Vis pred-SMPL joint # pred_joints_vis = ours_joints_3d[b,-9:,:3].copy() #(N,3) #Debuggin pred_joints_vis = ours_joints_3d[b,:,:3].copy() #(N,3) pred_joints_vis = convert_smpl_to_bbox(pred_joints_vis, camParam_scale, camParam_trans) pred_joints_vis = convert_bbox_to_oriIm(pred_joints_vis, boxScale_o2n, bboxTopLeft, rawImg.shape[1], rawImg.shape[0]) meshData.append(pred_meshes) skelData.append(pred_joints_vis.ravel()[:,np.newaxis]) # glViewer.setSkeleton( [pred_joints_vis.ravel()[:,np.newaxis]]) glViewer.setBackgroundTexture(rawImg) glViewer.setWindowSize(rawImg.shape[1]*args.magnifyFactor, rawImg.shape[0]*args.magnifyFactor) glViewer.SetOrthoCamera(True) # print("Press 'q' in the 3D window to go to the next sample") # glViewer.show(0) glViewer.setSkeleton(skelData) glViewer.setMeshData(meshData, bComputeNormal= True) if args.bRenderToFiles: #Export rendered files if os.path.exists(render_dirName) == False: #make a output folder if necessary os.mkdir(render_dirName) fileName = imgFullPathOri[:-4].replace("/","_") glViewer.render_on_image(render_dirName, fileName, rawImg) print(f"render to {fileName}") glViewer.show(args.displaytime)
def visEFT_singleSubject(renderer): MAGNIFY_RATIO = 3 #onbbox only. To magnify the rendered image size bStopForEachSample = args.waitforkeys #if True, it will wait for any key pressed to move to the next sample bShowTurnTable = args.turntable inputData = args.fit_data imgDir = args.img_dir #Load SMPL model smplModelPath = args.smpl_dir + '/basicModel_neutral_lbs_10_207_0_v1.0.0.pkl' smpl = SMPL_19(smplModelPath, batch_size=1, create_transl=False) #Load EFT fitting data print(f"Loading EFT data from {inputData}") if os.path.exists(inputData): with open(inputData,'r') as f: eft_data = json.load(f) print("EFT data: ver {}".format(eft_data['ver'])) eft_data_all = eft_data['data'] else: print(f"ERROR:: Cannot find EFT data: {inputData}") assert False #Visualize each EFT Fitting output for idx, eft_data in enumerate(tqdm(eft_data_all)): #Get raw image path imgFullPath = eft_data['imageName'] # imgName = os.path.basename(imgFullPath) imgName = imgFullPath imgFullPath =os.path.join(imgDir, imgName) if os.path.exists(imgFullPath) ==False: print(f"Img path is not valid: {imgFullPath}") assert False rawImg = cv2.imread(imgFullPath) print(f'Input image: {imgFullPath}') #EFT data bbox_scale = eft_data['bbox_scale'] bbox_center = eft_data['bbox_center'] pred_camera = np.array(eft_data['parm_cam']) pred_betas = np.reshape(np.array( eft_data['parm_shape'], dtype=np.float32), (1,10) ) #(10,) pred_betas = torch.from_numpy(pred_betas) pred_pose_rotmat = np.reshape( np.array( eft_data['parm_pose'], dtype=np.float32), (1,24,3,3) ) #(24,3,3) pred_pose_rotmat = torch.from_numpy(pred_pose_rotmat) keypoint_2d_validity = eft_data['joint_validity_openpose18'] #COCO only. Annotation index if 'annotId' in eft_data.keys(): print("COCO annotId: {}".format(eft_data['annotId'])) #Get SMPL mesh and joints from SMPL parameters smpl_output = smpl(betas=pred_betas, body_pose=pred_pose_rotmat[:,1:], global_orient=pred_pose_rotmat[:,[0]], pose2rot=False) smpl_vertices = smpl_output.vertices.detach().cpu().numpy()[0] smpl_joints_3d = smpl_output.joints.detach().cpu().numpy()[0] #Crop image using cropping information croppedImg, boxScale_o2n, bboxTopLeft = crop_bboxInfo(rawImg, bbox_center, bbox_scale, (BBOX_IMG_RES, BBOX_IMG_RES) ) if MAGNIFY_RATIO>1: croppedImg = cv2.resize(croppedImg, (croppedImg.shape[1]*MAGNIFY_RATIO, croppedImg.shape[0]*MAGNIFY_RATIO) ) ######################## # Visualization ######################## # Visualize 2D image if True: viewer2D.ImShow(rawImg, name='rawImg', waitTime=1) #You should press any key viewer2D.ImShow(croppedImg, name='croppedImg', waitTime=1) #Convert bbox_center, bbox_scale --> bbox_xyxy bbox_xyxy = conv_bboxinfo_bboxXYXY(bbox_scale,bbox_center) img_bbox = viewer2D.Vis_Bbox_minmaxPt(rawImg.copy(),bbox_xyxy[:2], bbox_xyxy[2:]) viewer2D.ImShow(img_bbox, name='img_bbox', waitTime=1) # Visualization Mesh if True: camParam_scale = pred_camera[0] camParam_trans = pred_camera[1:] pred_vert_vis = smpl_vertices smpl_joints_3d_vis = smpl_joints_3d if args.onbbox: pred_vert_vis = convert_smpl_to_bbox(pred_vert_vis, camParam_scale, camParam_trans) smpl_joints_3d_vis = convert_smpl_to_bbox(smpl_joints_3d_vis, camParam_scale, camParam_trans) renderer.setBackgroundTexture(croppedImg) renderer.setViewportSize(croppedImg.shape[1], croppedImg.shape[0]) pred_vert_vis *=MAGNIFY_RATIO else: #Covert SMPL to BBox first pred_vert_vis = convert_smpl_to_bbox(pred_vert_vis, camParam_scale, camParam_trans) smpl_joints_3d_vis = convert_smpl_to_bbox(smpl_joints_3d_vis, camParam_scale, camParam_trans) #From cropped space to original pred_vert_vis = convert_bbox_to_oriIm(pred_vert_vis, boxScale_o2n, bboxTopLeft, rawImg.shape[1], rawImg.shape[0]) smpl_joints_3d_vis = convert_bbox_to_oriIm(smpl_joints_3d_vis, boxScale_o2n, bboxTopLeft, rawImg.shape[1], rawImg.shape[0]) renderer.setBackgroundTexture(rawImg) renderer.setViewportSize(rawImg.shape[1], rawImg.shape[0]) #In orthographic model. XY of 3D is just 2D projection smpl_joints_2d_vis = conv_3djoint_2djoint(smpl_joints_3d_vis,rawImg.shape ) # image_2dkeypoint_pred = viewer2D.Vis_Skeleton_2D_smpl45(smpl_joints_2d_vis, image=rawImg.copy(),color=(0,255,255)) image_2dkeypoint_pred = viewer2D.Vis_Skeleton_2D_Openpose18(smpl_joints_2d_vis, image=rawImg.copy(),color=(255,0,0)) #All 2D joint image_2dkeypoint_pred = viewer2D.Vis_Skeleton_2D_Openpose18(smpl_joints_2d_vis, pt2d_visibility=keypoint_2d_validity, image=image_2dkeypoint_pred,color=(0,255,255)) #Only valid viewer2D.ImShow(image_2dkeypoint_pred, name='keypoint_2d_pred', waitTime=1) pred_meshes = {'ver': pred_vert_vis, 'f': smpl.faces} v = pred_meshes['ver'] f = pred_meshes['f'] #Visualize in the original image space renderer.set_mesh(v,f) renderer.showBackground(True) renderer.setWorldCenterBySceneCenter() renderer.setCameraViewMode("cam") #Set image size for rendering if args.onbbox: renderer.setViewportSize(croppedImg.shape[1], croppedImg.shape[0]) else: renderer.setViewportSize(rawImg.shape[1], rawImg.shape[0]) renderer.display() renderImg = renderer.get_screen_color_ibgr() viewer2D.ImShow(renderImg,waitTime=1) # Visualize multi-level cropped bbox if args.multi_bbox: from demo.multi_bbox_gen import multilvel_bbox_crop_gen bbox_list = multilvel_bbox_crop_gen(rawImg, pred_vert_vis, bbox_center, bbox_scale) #Visualize BBox for b_idx, b in enumerate(bbox_list): # bbox_xyxy= conv_bboxinfo_centerscale_to_bboxXYXY(b['center'], b['scale']) bbox_xyxy= b['bbox_xyxy'] if b_idx==0: img_multi_bbox = viewer2D.Vis_Bbox_minmaxPt(rawImg, bbox_xyxy[:2], bbox_xyxy[2:] ,color=(0,255,0)) else: img_multi_bbox = viewer2D.Vis_Bbox_minmaxPt(rawImg, bbox_xyxy[:2], bbox_xyxy[2:] ,color=(0,255,255)) viewer2D.ImShow(img_multi_bbox, name='multi_bbox', waitTime=1) # for bbox in bbox_list: # Visualization Mesh on side view if True: renderer.showBackground(False) renderer.setWorldCenterBySceneCenter() # renderer.setCameraViewMode("side") #To show the object in side vie renderer.setCameraViewMode("free") renderer.setViewAngle(90,20) #Set image size for rendering if args.onbbox: renderer.setViewportSize(croppedImg.shape[1], croppedImg.shape[0]) else: renderer.setViewportSize(rawImg.shape[1], rawImg.shape[0]) renderer.display() sideImg = renderer.get_screen_color_ibgr() #Overwite on rawImg viewer2D.ImShow(sideImg,waitTime=1) sideImg = cv2.resize(sideImg, (renderImg.shape[1], renderImg.shape[0]) ) # renderImg = cv2.resize(renderImg, (sideImg.shape[1], sideImg.shape[0]) ) # Visualization Mesh on side view if True: renderer.showBackground(False) renderer.setWorldCenterBySceneCenter() # renderer.setCameraViewMode("side") #To show the object in side vie renderer.setCameraViewMode("free") renderer.setViewAngle(-60,50) #Set image size for rendering if args.onbbox: renderer.setViewportSize(croppedImg.shape[1], croppedImg.shape[0]) else: renderer.setViewportSize(rawImg.shape[1], rawImg.shape[0]) renderer.display() sideImg_2 = renderer.get_screen_color_ibgr() #Overwite on rawImg viewer2D.ImShow(sideImg_2,waitTime=1) sideImg_2 = cv2.resize(sideImg_2, (renderImg.shape[1], renderImg.shape[0]) ) # renderImg = cv2.resize(renderImg, (sideImg.shape[1], sideImg.shape[0]) ) #Visualize camera view and side view saveImg = np.concatenate( (renderImg,sideImg), axis =1) # saveImg = np.concatenate( (croppedImg, renderImg,sideImg, sideImg_2), axis =1) if bStopForEachSample: viewer2D.ImShow(saveImg,waitTime=0) #waitTime=0 means that it will wait for any key pressed else: viewer2D.ImShow(saveImg,waitTime=1) #Render Mesh on the rotating view if bShowTurnTable: renderer.showBackground(False) renderer.setWorldCenterBySceneCenter() renderer.setCameraViewMode("free") for i in range(90): renderer.setViewAngle(i*4,0) renderer.display() sideImg = renderer.get_screen_color_ibgr() #Overwite on rawImg viewer2D.ImShow(sideImg,waitTime=1,name="turn_table") if False: #If you want to save this into files render_output_path = args.render_dir + '/turntable_{}_{:08d}.jpg'.format(os.path.basename(imgName),i) cv2.imwrite(render_output_path, sideImg) #Save the rendered image to files if True: if os.path.exists(args.render_dir) == False: os.mkdir(args.render_dir) render_output_path = args.render_dir + '/render_{}_eft{:08d}.jpg'.format(imgName[:-4],idx) print(f"Save to {render_output_path}") cv2.imwrite(render_output_path, saveImg)
def visEFT_multiSubjects(renderer): bStopForEachSample = args.waitforkeys #if True, it will wait for any key pressed to move to the next sample bShowTurnTable = args.turntable # inputDir = args.fit_dir inputData = args.fit_data imgDir = args.img_dir smplModelPath = args.smpl_dir + '/basicModel_neutral_lbs_10_207_0_v1.0.0.pkl' smpl = SMPL(smplModelPath, batch_size=1, create_transl=False) if os.path.exists(inputData): with open(inputData,'r') as f: eft_data = json.load(f) print("EFT data: ver {}".format(eft_data['ver'])) eft_data_all = eft_data['data'] else: print(f"ERROR:: Cannot find EFT data: {inputData}") assert False #Aggregate all efl per image eft_perimage ={} for idx, eft_data in enumerate(eft_data_all): #Load imageName = eft_data['imageName'] if imageName not in eft_perimage.keys(): eft_perimage[imageName] =[] eft_perimage[imageName].append(eft_data) for imgName in tqdm(eft_perimage): eft_data_perimage = eft_perimage[imgName] renderer.clear_mesh() for idx,eft_data in enumerate(eft_data_perimage): #Get raw image path imgFullPath = eft_data['imageName'] imgName = os.path.basename(imgFullPath) imgFullPath =os.path.join(imgDir, imgName) if os.path.exists(imgFullPath) ==False: print(f"Img path is not valid: {imgFullPath}") assert False rawImg = cv2.imread(imgFullPath) print(f'Input image: {imgFullPath}') bbox_scale = eft_data['bbox_scale'] bbox_center = eft_data['bbox_center'] pred_camera = np.array(eft_data['parm_cam']) pred_betas = np.reshape(np.array( eft_data['parm_shape'], dtype=np.float32), (1,10) ) #(10,) pred_betas = torch.from_numpy(pred_betas) pred_pose_rotmat = np.reshape( np.array( eft_data['parm_pose'], dtype=np.float32), (1,24,3,3) ) #(24,3,3) pred_pose_rotmat = torch.from_numpy(pred_pose_rotmat) # gt_keypoint_2d = np.reshape( np.array(eft_data['gt_keypoint_2d']), (-1,3)) #(49,3) keypoint_2d_validity = eft_data['joint_validity_openpose18'] #COCO only. Annotation index print("COCO annotId: {}".format(eft_data['annotId'])) #Obtain skeleton and smpl data smpl_output = smpl(betas=pred_betas, body_pose=pred_pose_rotmat[:,1:], global_orient=pred_pose_rotmat[:,0].unsqueeze(1), pose2rot=False ) smpl_vertices = smpl_output.vertices.detach().cpu().numpy() smpl_joints_3d = smpl_output.joints.detach().cpu().numpy() #Crop image croppedImg, boxScale_o2n, bboxTopLeft = crop_bboxInfo(rawImg.copy(), bbox_center, bbox_scale, (BBOX_IMG_RES, BBOX_IMG_RES) ) ######################## # Visualize # Visualize 2D image if False: viewer2D.ImShow(rawImg, name='rawImg', waitTime=1) #You should press any key viewer2D.ImShow(croppedImg, name='croppedImg', waitTime=1) # Visualization Mesh on raw images if True: camParam_scale = pred_camera[0] camParam_trans = pred_camera[1:] pred_vert_vis = smpl_vertices[0] smpl_joints_3d_vis = smpl_joints_3d[0] if False:#args.onbbox: #Always in the original image pred_vert_vis = convert_smpl_to_bbox(pred_vert_vis, camParam_scale, camParam_trans) smpl_joints_3d_vis = convert_smpl_to_bbox(smpl_joints_3d_vis, camParam_scale, camParam_trans) renderer.setBackgroundTexture(croppedImg) renderer.setViewportSize(croppedImg.shape[1], croppedImg.shape[0]) else: #Covert SMPL to BBox first pred_vert_vis = convert_smpl_to_bbox(pred_vert_vis, camParam_scale, camParam_trans) smpl_joints_3d_vis = convert_smpl_to_bbox(smpl_joints_3d_vis, camParam_scale, camParam_trans) #From cropped space to original pred_vert_vis = convert_bbox_to_oriIm(pred_vert_vis, boxScale_o2n, bboxTopLeft, rawImg.shape[1], rawImg.shape[0]) smpl_joints_3d_vis = convert_bbox_to_oriIm(smpl_joints_3d_vis, boxScale_o2n, bboxTopLeft, rawImg.shape[1], rawImg.shape[0]) renderer.setBackgroundTexture(rawImg) renderer.setViewportSize(rawImg.shape[1], rawImg.shape[0]) pred_meshes = {'ver': pred_vert_vis, 'f': smpl.faces} v = pred_meshes['ver'] f = pred_meshes['f'] #Visualize in the original image spaceq # renderer.set_mesh(v,f) renderer.add_mesh(v,f) #Render Mesh on the camera view renderer.showBackground(True) renderer.setWorldCenterBySceneCenter() renderer.setCameraViewMode("cam") renderer.display() overlaid = renderer.get_screen_color_ibgr() #Overwite on rawImg # viewer2D.ImShow(overlaid,waitTime=1,name="overlaid") if bStopForEachSample: viewer2D.ImShow(overlaid,waitTime=0,name="overlaid") #waitTime=0 means that it will wait for any key pressed else: viewer2D.ImShow(overlaid,waitTime=1,name="overlaid") #Render Mesh on the rotating view if bShowTurnTable: renderer.showBackground(False) renderer.setWorldCenterBySceneCenter() renderer.setCameraViewMode("free") for i in range(90): renderer.setViewAngle(i*4,0) renderer.display() sideImg = renderer.get_screen_color_ibgr() #Overwite on rawImg viewer2D.ImShow(sideImg,waitTime=1,name="turn_table") if True: #Save the rendered image to files if os.path.exists(args.render_dir) == False: os.mkdir(args.render_dir) render_output_path = args.render_dir + '/render_{}.jpg'.format(imgName) print(f"Save to {render_output_path}") cv2.imwrite(render_output_path, rawImg)