def _keypoint_camera_to_world(keypoints, camera_params, image_name=None, dataset='Body3DH36MDataset'): """Project 3D keypoints from the camera space to the world space. Args: keypoints (np.ndarray): 3D keypoints in shape [..., 3] camera_params (dict): Parameters for all cameras. image_name (str): The image name to specify the camera. dataset (str): The dataset type, e.g. Body3DH36MDataset. """ cam_key = None if dataset == 'Body3DH36MDataset': subj, rest = osp.basename(image_name).split('_', 1) _, rest = rest.split('.', 1) camera, rest = rest.split('_', 1) cam_key = (subj, camera) else: raise NotImplementedError camera = SimpleCamera(camera_params[cam_key]) keypoints_world = keypoints.copy() keypoints_world[..., :3] = camera.camera_to_world(keypoints[..., :3]) return keypoints_world
def test_camera_projection(): results = get_data_sample() pipeline_1 = [ dict(type='CameraProjection', item='input_3d', output_name='input_3d_w', camera_type='SimpleCamera', mode='camera_to_world'), dict(type='CameraProjection', item='input_3d_w', output_name='input_3d_wp', camera_type='SimpleCamera', mode='world_to_pixel'), dict(type='CameraProjection', item='input_3d', output_name='input_3d_p', camera_type='SimpleCamera', mode='camera_to_pixel'), dict(type='Collect', keys=['input_3d_wp', 'input_3d_p'], meta_keys=[]) ] camera_param = results['camera_param'].copy() camera_param['K'] = np.concatenate( (np.diagflat(camera_param['f']), camera_param['c']), axis=-1) pipeline_2 = [ dict(type='CameraProjection', item='input_3d', output_name='input_3d_w', camera_type='SimpleCamera', camera_param=camera_param, mode='camera_to_world'), dict(type='CameraProjection', item='input_3d_w', output_name='input_3d_wp', camera_type='SimpleCamera', camera_param=camera_param, mode='world_to_pixel'), dict(type='CameraProjection', item='input_3d', output_name='input_3d_p', camera_type='SimpleCamera', camera_param=camera_param, mode='camera_to_pixel'), dict(type='CameraProjection', item='input_3d_w', output_name='input_3d_wc', camera_type='SimpleCamera', camera_param=camera_param, mode='world_to_camera'), dict(type='Collect', keys=['input_3d_wp', 'input_3d_p', 'input_2d'], meta_keys=[]) ] output1 = Compose(pipeline_1)(results) output2 = Compose(pipeline_2)(results) np.testing.assert_allclose(output1['input_3d_wp'], output1['input_3d_p'], rtol=1e-6) np.testing.assert_allclose(output2['input_3d_wp'], output2['input_3d_p'], rtol=1e-6) np.testing.assert_allclose(output2['input_3d_p'], output2['input_2d'], rtol=1e-3, atol=1e-1) # test invalid camera parameters with pytest.raises(ValueError): # missing intrinsic parameters camera_param_wo_intrinsic = camera_param.copy() camera_param_wo_intrinsic.pop('K') camera_param_wo_intrinsic.pop('f') camera_param_wo_intrinsic.pop('c') _ = Compose([ dict(type='CameraProjection', item='input_3d', camera_type='SimpleCamera', camera_param=camera_param_wo_intrinsic, mode='camera_to_pixel') ]) with pytest.raises(ValueError): # invalid mode _ = Compose([ dict(type='CameraProjection', item='input_3d', camera_type='SimpleCamera', camera_param=camera_param, mode='dummy') ]) # test camera without undistortion camera_param_wo_undistortion = camera_param.copy() camera_param_wo_undistortion.pop('k') camera_param_wo_undistortion.pop('p') _ = Compose([ dict(type='CameraProjection', item='input_3d', camera_type='SimpleCamera', camera_param=camera_param_wo_undistortion, mode='camera_to_pixel') ]) # test pixel to camera transformation camera = SimpleCamera(camera_param_wo_undistortion) kpt_camera = np.random.rand(14, 3) kpt_pixel = camera.camera_to_pixel(kpt_camera) _kpt_camera = camera.pixel_to_camera( np.concatenate([kpt_pixel, kpt_camera[:, [2]]], -1)) assert_array_almost_equal(_kpt_camera, kpt_camera, decimal=4)
def main(): parser = ArgumentParser() parser.add_argument('pose_config', help='Config file for pose network') parser.add_argument('pose_checkpoint', help='Checkpoint file') parser.add_argument('--img-root', type=str, default='', help='Image root') parser.add_argument('--json-file', type=str, default='', help='Json file containing image info.') parser.add_argument( '--camera-param-file', type=str, default=None, help='Camera parameter file for converting 3D pose predictions from ' ' the pixel space to camera space. If None, keypoints in pixel space' 'will be visualized') parser.add_argument( '--gt-joints-file', type=str, default=None, help='Optional arguement. Ground truth 3D keypoint parameter file. ' 'If None, gt keypoints will not be shown and keypoints in pixel ' 'space will be visualized.') parser.add_argument( '--rebase-keypoint-height', action='store_true', help='Rebase the predicted 3D pose so its lowest keypoint has a ' 'height of 0 (landing on the ground). This is useful for ' 'visualization when the model do not predict the global position ' 'of the 3D pose.') parser.add_argument( '--show-ground-truth', action='store_true', help='If True, show ground truth keypoint if it is available.') parser.add_argument('--show', action='store_true', default=False, help='whether to show img') parser.add_argument('--out-img-root', type=str, default=None, help='Root of the output visualization images. ' 'Default not saving the visualization images.') parser.add_argument('--device', default='cuda:0', help='Device for inference') parser.add_argument('--kpt-thr', type=float, default=0.3, help='Keypoint score threshold') parser.add_argument('--radius', type=int, default=4, help='Keypoint radius for visualization') parser.add_argument('--thickness', type=int, default=1, help='Link thickness for visualization') args = parser.parse_args() assert args.show or (args.out_img_root != '') coco = COCO(args.json_file) # build the pose model from a config file and a checkpoint file pose_model = init_pose_model(args.pose_config, args.pose_checkpoint, device=args.device.lower()) dataset = pose_model.cfg.data['test']['type'] # load camera parameters camera_params = None if args.camera_param_file is not None: camera_params = mmcv.load(args.camera_param_file) # load ground truth joints parameters gt_joint_params = None if args.gt_joints_file is not None: gt_joint_params = mmcv.load(args.gt_joints_file) # load hand bounding boxes det_results_list = [] for image_id, image in coco.imgs.items(): image_name = osp.join(args.img_root, image['file_name']) ann_ids = coco.getAnnIds(image_id) det_results = [] capture_key = str(image['capture']) camera_key = image['camera'] frame_idx = image['frame_idx'] for ann_id in ann_ids: ann = coco.anns[ann_id] if camera_params is not None: camera_param = { key: camera_params[capture_key][key][camera_key] for key in camera_params[capture_key].keys() } camera_param = _transform_interhand_camera_param(camera_param) else: camera_param = None if gt_joint_params is not None: joint_param = gt_joint_params[capture_key][str(frame_idx)] gt_joint = np.concatenate([ np.array(joint_param['world_coord']), np.array(joint_param['joint_valid']) ], axis=-1) else: gt_joint = None det_result = { 'image_name': image_name, 'bbox': ann['bbox'], # bbox format is 'xywh' 'camera_param': camera_param, 'keypoints_3d_gt': gt_joint } det_results.append(det_result) det_results_list.append(det_results) for i, det_results in enumerate( mmcv.track_iter_progress(det_results_list)): image_name = det_results[0]['image_name'] pose_results = inference_interhand_3d_model(pose_model, image_name, det_results, dataset=dataset) # Post processing pose_results_vis = [] for idx, res in enumerate(pose_results): keypoints_3d = res['keypoints_3d'] # normalize kpt score if keypoints_3d[:, 3].max() > 1: keypoints_3d[:, 3] /= 255 # get 2D keypoints in pixel space res['keypoints'] = keypoints_3d[:, [0, 1, 3]] # For model-predicted keypoints, channel 0 and 1 are coordinates # in pixel space, and channel 2 is the depth (in mm) relative # to root joints. # If both camera parameter and absolute depth of root joints are # provided, we can transform keypoint to camera space for better # visualization. camera_param = res['camera_param'] keypoints_3d_gt = res['keypoints_3d_gt'] if camera_param is not None and keypoints_3d_gt is not None: # build camera model camera = SimpleCamera(camera_param) # transform gt joints from world space to camera space keypoints_3d_gt[:, :3] = camera.world_to_camera( keypoints_3d_gt[:, :3]) # transform relative depth to absolute depth keypoints_3d[:21, 2] += keypoints_3d_gt[20, 2] keypoints_3d[21:, 2] += keypoints_3d_gt[41, 2] # transform keypoints from pixel space to camera space keypoints_3d[:, :3] = camera.pixel_to_camera( keypoints_3d[:, :3]) # rotate the keypoint to make z-axis correspondent to height # for better visualization vis_R = np.array([[1, 0, 0], [0, 0, -1], [0, 1, 0]]) keypoints_3d[:, :3] = keypoints_3d[:, :3] @ vis_R if keypoints_3d_gt is not None: keypoints_3d_gt[:, :3] = keypoints_3d_gt[:, :3] @ vis_R # rebase height (z-axis) if args.rebase_keypoint_height: valid = keypoints_3d[..., 3] > 0 keypoints_3d[..., 2] -= np.min(keypoints_3d[valid, 2], axis=-1, keepdims=True) res['keypoints_3d'] = keypoints_3d res['keypoints_3d_gt'] = keypoints_3d_gt # Add title instance_id = res.get('track_id', idx) res['title'] = f'Prediction ({instance_id})' pose_results_vis.append(res) # Add ground truth if args.show_ground_truth: if keypoints_3d_gt is None: print('Fail to show ground truth. Please make sure that' ' gt-joints-file is provided.') else: gt = res.copy() if args.rebase_keypoint_height: valid = keypoints_3d_gt[..., 3] > 0 keypoints_3d_gt[..., 2] -= np.min(keypoints_3d_gt[valid, 2], axis=-1, keepdims=True) gt['keypoints_3d'] = keypoints_3d_gt gt['title'] = f'Ground truth ({instance_id})' pose_results_vis.append(gt) # Visualization if args.out_img_root is None: out_file = None else: os.makedirs(args.out_img_root, exist_ok=True) out_file = osp.join(args.out_img_root, f'vis_{i}.jpg') vis_3d_pose_result( pose_model, result=pose_results_vis, img=det_results[0]['image_name'], out_file=out_file, dataset=dataset, show=args.show, kpt_score_thr=args.kpt_thr, radius=args.radius, thickness=args.thickness, )