예제 #1
0
def main():
    args = parse_args()

    if args.root_work_dir is None:
        # get the current time stamp
        now = datetime.now()
        ts = now.strftime('%Y_%m_%d_%H_%M')
        args.root_work_dir = f'work_dirs/flops_test_{ts}'
    mmcv.mkdir_or_exist(osp.abspath(args.root_work_dir))

    cfg = mmcv.load(args.config)

    results = []
    for i in range(args.priority + 1):
        models = cfg['model_list'][f'P{i}']
        for cur_model in models:
            cfg_file = cur_model['config']
            model_cfg = Config.fromfile(cfg_file)
            if 'input_shape' in cur_model.keys():
                input_shape = cur_model['input_shape']
                input_shape = tuple(map(int, input_shape.split(',')))
            else:
                image_size = model_cfg.data_cfg.image_size
                if isinstance(image_size, list):
                    input_shape = (3, ) + tuple(image_size)
                else:
                    input_shape = (3, image_size, image_size)

            model = init_pose_model(cfg_file)

            if hasattr(model, 'forward_dummy'):
                model.forward = model.forward_dummy
            else:
                raise NotImplementedError(
                    'FLOPs counter is currently not currently supported '
                    'with {}'.format(model.__class__.__name__))

            flops, params = get_model_complexity_info(
                model, input_shape, print_per_layer_stat=False)
            split_line = '=' * 30
            result = f'{split_line}\nModel config:{cfg_file}\n' \
                     f'Input shape: {input_shape}\n' \
                     f'Flops: {flops}\nParams: {params}\n{split_line}\n'

            print(result)
            results.append(result)

    print('!!!Please be cautious if you use the results in papers. '
          'You may need to check if all ops are supported and verify that the '
          'flops computation is correct.')
    with open(osp.join(args.root_work_dir, 'flops.txt'), 'w') as f:
        for res in results:
            f.write(res)
예제 #2
0
def main():

    args = parse_args()

    if len(args.shape) == 1:
        input_shape = (3, args.shape[0], args.shape[0])
    elif len(args.shape) == 2:
        input_shape = (3, ) + tuple(args.shape)
    else:
        raise ValueError('invalid input shape')

    model = init_pose_model(args.config)

    if args.input_constructor == 'batch':
        input_constructor = partial(batch_constructor, model, args.batch_size)
    else:
        input_constructor = None

    if args.input_constructor == 'batch':
        input_constructor = partial(batch_constructor, model, args.batch_size)
    else:
        input_constructor = None

    if hasattr(model, 'forward_dummy'):
        model.forward = model.forward_dummy
    else:
        raise NotImplementedError(
            'FLOPs counter is currently not currently supported with {}'.
            format(model.__class__.__name__))

    flops, params = get_model_complexity_info(
        model,
        input_shape,
        input_constructor=input_constructor,
        print_per_layer_stat=(not args.not_print_per_layer_stat))
    split_line = '=' * 30
    input_shape = (args.batch_size, ) + input_shape
    print(f'{split_line}\nInput shape: {input_shape}\n'
          f'Flops: {flops}\nParams: {params}\n{split_line}')
    print('!!!Please be cautious if you use the results in papers. '
          'You may need to check if all ops are supported and verify that the '
          'flops computation is correct.')
예제 #3
0
def main():
    parser = ArgumentParser()
    parser.add_argument('pose_lifter_config',
                        help='Config file for the 2nd stage pose lifter model')
    parser.add_argument(
        'pose_lifter_checkpoint',
        help='Checkpoint file for the 2nd stage pose lifter model')
    parser.add_argument('--pose-detector-conifig',
                        type=str,
                        default=None,
                        help='Config file for the 1st stage 2D pose detector')
    parser.add_argument(
        '--pose-detector-checkpoint',
        type=str,
        default=None,
        help='Checkpoint file for the 1st stage 2D pose detector')
    parser.add_argument('--img-root', type=str, default='', help='Image root')
    parser.add_argument(
        '--json-file',
        type=str,
        default=None,
        help='Json file containing image and bbox inforamtion. Optionally,'
        'The Jons file can also contain 2D pose information. See'
        '"only-second-stage"')
    parser.add_argument(
        '--camera-param-file',
        type=str,
        default=None,
        help='Camera parameter file for converting 3D pose predictions from '
        ' the camera space to to world space. If None, no conversion will be '
        'applied.')
    parser.add_argument(
        '--only-second-stage',
        action='store_true',
        help='If true, load 2D pose detection result from the Json file and '
        'skip the 1st stage. The pose detection model will be ignored.')
    parser.add_argument(
        '--rebase-keypoint-height',
        action='store_true',
        help='Rebase the predicted 3D pose so its lowest keypoint has a '
        'height of 0 (landing on the ground). This is useful for '
        'visualization when the model do not predict the global position '
        'of the 3D pose.')
    parser.add_argument(
        '--show-ground-truth',
        action='store_true',
        help='If True, show ground truth if it is available. The ground truth '
        'should be contained in the annotations in the Json file with the key '
        '"keypoints_3d" for each instance.')
    parser.add_argument('--show',
                        action='store_true',
                        default=False,
                        help='whether to show img')
    parser.add_argument('--out-img-root',
                        type=str,
                        default=None,
                        help='Root of the output visualization images. '
                        'Default not saving the visualization images.')
    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device for inference')
    parser.add_argument('--kpt-thr', type=float, default=0.3)
    parser.add_argument('--radius',
                        type=int,
                        default=4,
                        help='Keypoint radius for visualization')
    parser.add_argument('--thickness',
                        type=int,
                        default=1,
                        help='Link thickness for visualization')

    args = parser.parse_args()
    assert args.show or (args.out_img_root != '')

    coco = COCO(args.json_file)

    # First stage: 2D pose detection
    pose_det_results_list = []
    if args.only_second_stage:
        from mmpose.apis.inference import _xywh2xyxy

        print('Stage 1: load 2D pose results from Json file.')
        for image_id, image in coco.imgs.items():
            image_name = osp.join(args.img_root, image['file_name'])
            ann_ids = coco.getAnnIds(image_id)
            pose_det_results = []
            for ann_id in ann_ids:
                ann = coco.anns[ann_id]
                keypoints = np.array(ann['keypoints']).reshape(-1, 3)
                keypoints[..., 2] = keypoints[..., 2] >= 1
                keypoints_3d = np.array(ann['keypoints_3d']).reshape(-1, 4)
                keypoints_3d[..., 3] = keypoints_3d[..., 3] >= 1
                bbox = np.array(ann['bbox']).reshape(1, -1)

                pose_det_result = {
                    'image_name': image_name,
                    'bbox': _xywh2xyxy(bbox),
                    'keypoints': keypoints,
                    'keypoints_3d': keypoints_3d
                }
                pose_det_results.append(pose_det_result)
            pose_det_results_list.append(pose_det_results)

    else:
        print('Stage 1: 2D pose detection.')

        pose_det_model = init_pose_model(args.pose_detector_config,
                                         args.pose_detector_checkpoint,
                                         device=args.device.lower())

        assert pose_det_model.cfg.model.type == 'TopDown', 'Only "TopDown"' \
            'model is supported for the 1st stage (2D pose detection)'

        dataset = pose_det_model.cfg.data['test']['type']
        img_keys = list(coco.imgs.keys())

        for i in mmcv.track_iter_progress(range(len(img_keys))):
            # get bounding box annotations
            image_id = img_keys[i]
            image = coco.loadImgs(image_id)[0]
            image_name = osp.join(args.img_root, image['file_name'])
            ann_ids = coco.getAnnIds(image_id)

            # make person results for single image
            person_results = []
            for ann_id in ann_ids:
                person = {}
                ann = coco.anns[ann_id]
                person['bbox'] = ann['bbox']
                person_results.append(person)

            pose_det_results, _ = inference_top_down_pose_model(
                pose_det_model,
                image_name,
                person_results,
                bbox_thr=None,
                format='xywh',
                dataset=dataset,
                return_heatmap=False,
                outputs=None)

            for res in pose_det_results:
                res['image_name'] = image_name
            pose_det_results_list.append(pose_det_results)

    # Second stage: Pose lifting
    print('Stage 2: 2D-to-3D pose lifting.')

    pose_lift_model = init_pose_model(args.pose_lifter_config,
                                      args.pose_lifter_checkpoint,
                                      device=args.device.lower())

    assert pose_lift_model.cfg.model.type == 'PoseLifter', 'Only' \
        '"PoseLifter" model is supported for the 2nd stage ' \
        '(2D-to-3D lifting)'
    dataset = pose_lift_model.cfg.data['test']['type']

    camera_params = None
    if args.camera_param_file is not None:
        camera_params = mmcv.load(args.camera_param_file)

    for i, pose_det_results in enumerate(
            mmcv.track_iter_progress(pose_det_results_list)):
        # 2D-to-3D pose lifting
        # Note that the pose_det_results are regarded as a single-frame pose
        # sequence
        pose_lift_results = inference_pose_lifter_model(
            pose_lift_model,
            pose_results_2d=[pose_det_results],
            dataset=dataset,
            with_track_id=False)

        image_name = pose_det_results[0]['image_name']

        # Pose processing
        pose_lift_results_vis = []
        for idx, res in enumerate(pose_lift_results):
            keypoints_3d = res['keypoints_3d']
            # project to world space
            if camera_params is not None:
                keypoints_3d = _keypoint_camera_to_world(
                    keypoints_3d,
                    camera_params=camera_params,
                    image_name=image_name,
                    dataset=dataset)
            # rebase height (z-axis)
            if args.rebase_keypoint_height:
                keypoints_3d[..., 2] -= np.min(keypoints_3d[..., 2],
                                               axis=-1,
                                               keepdims=True)
            res['keypoints_3d'] = keypoints_3d
            # Add title
            det_res = pose_det_results[idx]
            instance_id = det_res.get('track_id', idx)
            res['title'] = f'Prediction ({instance_id})'
            pose_lift_results_vis.append(res)
            # Add ground truth
            if args.show_ground_truth:
                if 'keypoints_3d' not in det_res:
                    print('Fail to show ground truth. Please make sure that'
                          ' the instance annotations from the Json file'
                          ' contain "keypoints_3d".')
                else:
                    gt = res.copy()
                    gt['keypoints_3d'] = det_res['keypoints_3d']
                    gt['title'] = f'Ground truth ({instance_id})'
                    pose_lift_results_vis.append(gt)

        # Visualization
        if args.out_img_root is None:
            out_file = None
        else:
            os.makedirs(args.out_img_root, exist_ok=True)
            out_file = osp.join(args.out_img_root, f'vis_{i}.jpg')

        vis_3d_pose_result(pose_lift_model,
                           result=pose_lift_results_vis,
                           img=pose_lift_results[0]['image_name'],
                           out_file=out_file)
예제 #4
0
def main():
    args = parse_args()

    if 'cuda' in args.device.lower():
        if torch.cuda.is_available():
            with_cuda = True
        else:
            raise RuntimeError('No CUDA device found, please check it again.')
    else:
        with_cuda = False

    if args.root_work_dir is None:
        # get the current time stamp
        now = datetime.now()
        ts = now.strftime('%Y_%m_%d_%H_%M')
        args.root_work_dir = f'work_dirs/inference_speed_test_{ts}'
    mmcv.mkdir_or_exist(osp.abspath(args.root_work_dir))

    cfg = mmcv.load(args.config)
    dummy_datasets = mmcv.load(args.dummy_dataset_config)['dummy_datasets']

    results = []
    for i in range(args.priority + 1):
        models = cfg['model_list'][f'P{i}']
        for cur_model in models:
            cfg_file = cur_model['config']
            model_cfg = Config.fromfile(cfg_file)
            test_dataset = model_cfg['data']['test']
            dummy_dataset = dummy_datasets[test_dataset['type']]
            test_dataset.update(dummy_dataset)

            dataset = build_dataset(test_dataset)
            data_loader = build_dataloader(
                dataset,
                samples_per_gpu=args.batch_size,
                workers_per_gpu=model_cfg.data.workers_per_gpu,
                dist=False,
                shuffle=False)
            data_loader = IterLoader(data_loader)

            if 'pretrained' in model_cfg.model.keys():
                del model_cfg.model['pretrained']

            model = init_pose_model(model_cfg, device=args.device.lower())

            fp16_cfg = model_cfg.get('fp16', None)
            if fp16_cfg is not None:
                wrap_fp16_model(model)
            if args.fuse_conv_bn:
                model = fuse_conv_bn(model)

            # benchmark with several iterations and take the average
            pure_inf_time = 0
            speed = []
            for iteration in range(args.num_iters + args.num_warmup):
                data = next(data_loader)
                data['img'] = data['img'].to(args.device.lower())
                data['img_metas'] = data['img_metas'].data[0]

                if with_cuda:
                    torch.cuda.synchronize()

                start_time = time.perf_counter()
                with torch.no_grad():
                    model(return_loss=False, **data)

                if with_cuda:
                    torch.cuda.synchronize()
                elapsed = time.perf_counter() - start_time

                if iteration >= args.num_warmup:
                    pure_inf_time += elapsed
                    speed.append(1 / elapsed)

            speed_mean = np.mean(speed)
            speed_std = np.std(speed)

            split_line = '=' * 30
            result = f'{split_line}\nModel config:{cfg_file}\n' \
                     f'Device: {args.device}\n' \
                     f'Batch size: {args.batch_size}\n' \
                     f'Overall average speed: {speed_mean:.2f} \u00B1 ' \
                     f'{speed_std:.2f} items / s\n' \
                     f'Total iters: {args.num_iters}\n'\
                     f'Total time: {pure_inf_time:.2f} s \n{split_line}\n'\

            print(result)
            results.append(result)

    print('!!!Please be cautious if you use the results in papers. '
          'You may need to check if all ops are included and verify that the '
          'speed computation is correct.')
    with open(osp.join(args.root_work_dir, 'inference_speed.txt'), 'w') as f:
        for res in results:
            f.write(res)
예제 #5
0
def main():
    parser = ArgumentParser()
    parser.add_argument('pose_config', help='Config file for pose network')
    parser.add_argument('pose_checkpoint', help='Checkpoint file')
    parser.add_argument('--img-root', type=str, default='', help='Image root')
    parser.add_argument('--json-file',
                        type=str,
                        default='',
                        help='Json file containing image info.')
    parser.add_argument(
        '--camera-param-file',
        type=str,
        default=None,
        help='Camera parameter file for converting 3D pose predictions from '
        ' the pixel space to camera space. If None, keypoints in pixel space'
        'will be visualized')
    parser.add_argument(
        '--gt-joints-file',
        type=str,
        default=None,
        help='Optional arguement. Ground truth 3D keypoint parameter file. '
        'If None, gt keypoints will not be shown and keypoints in pixel '
        'space will be visualized.')
    parser.add_argument(
        '--rebase-keypoint-height',
        action='store_true',
        help='Rebase the predicted 3D pose so its lowest keypoint has a '
        'height of 0 (landing on the ground). This is useful for '
        'visualization when the model do not predict the global position '
        'of the 3D pose.')
    parser.add_argument(
        '--show-ground-truth',
        action='store_true',
        help='If True, show ground truth keypoint if it is available.')
    parser.add_argument('--show',
                        action='store_true',
                        default=False,
                        help='whether to show img')
    parser.add_argument('--out-img-root',
                        type=str,
                        default=None,
                        help='Root of the output visualization images. '
                        'Default not saving the visualization images.')
    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device for inference')
    parser.add_argument('--kpt-thr',
                        type=float,
                        default=0.3,
                        help='Keypoint score threshold')
    parser.add_argument('--radius',
                        type=int,
                        default=4,
                        help='Keypoint radius for visualization')
    parser.add_argument('--thickness',
                        type=int,
                        default=1,
                        help='Link thickness for visualization')

    args = parser.parse_args()
    assert args.show or (args.out_img_root != '')

    coco = COCO(args.json_file)

    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(args.pose_config,
                                 args.pose_checkpoint,
                                 device=args.device.lower())
    dataset = pose_model.cfg.data['test']['type']

    # load camera parameters
    camera_params = None
    if args.camera_param_file is not None:
        camera_params = mmcv.load(args.camera_param_file)
    # load ground truth joints parameters
    gt_joint_params = None
    if args.gt_joints_file is not None:
        gt_joint_params = mmcv.load(args.gt_joints_file)

    # load hand bounding boxes
    det_results_list = []
    for image_id, image in coco.imgs.items():
        image_name = osp.join(args.img_root, image['file_name'])

        ann_ids = coco.getAnnIds(image_id)
        det_results = []

        capture_key = str(image['capture'])
        camera_key = image['camera']
        frame_idx = image['frame_idx']

        for ann_id in ann_ids:
            ann = coco.anns[ann_id]
            if camera_params is not None:
                camera_param = {
                    key: camera_params[capture_key][key][camera_key]
                    for key in camera_params[capture_key].keys()
                }
                camera_param = _transform_interhand_camera_param(camera_param)
            else:
                camera_param = None
            if gt_joint_params is not None:
                joint_param = gt_joint_params[capture_key][str(frame_idx)]
                gt_joint = np.concatenate([
                    np.array(joint_param['world_coord']),
                    np.array(joint_param['joint_valid'])
                ],
                                          axis=-1)
            else:
                gt_joint = None

            det_result = {
                'image_name': image_name,
                'bbox': ann['bbox'],  # bbox format is 'xywh'
                'camera_param': camera_param,
                'keypoints_3d_gt': gt_joint
            }
            det_results.append(det_result)
        det_results_list.append(det_results)

    for i, det_results in enumerate(
            mmcv.track_iter_progress(det_results_list)):

        image_name = det_results[0]['image_name']

        pose_results = inference_interhand_3d_model(pose_model,
                                                    image_name,
                                                    det_results,
                                                    dataset=dataset)

        # Post processing
        pose_results_vis = []
        for idx, res in enumerate(pose_results):
            keypoints_3d = res['keypoints_3d']
            # normalize kpt score
            if keypoints_3d[:, 3].max() > 1:
                keypoints_3d[:, 3] /= 255
            # get 2D keypoints in pixel space
            res['keypoints'] = keypoints_3d[:, [0, 1, 3]]

            # For model-predicted keypoints, channel 0 and 1 are coordinates
            # in pixel space, and channel 2 is the depth (in mm) relative
            # to root joints.
            # If both camera parameter and absolute depth of root joints are
            # provided, we can transform keypoint to camera space for better
            # visualization.
            camera_param = res['camera_param']
            keypoints_3d_gt = res['keypoints_3d_gt']
            if camera_param is not None and keypoints_3d_gt is not None:
                # build camera model
                camera = SimpleCamera(camera_param)
                # transform gt joints from world space to camera space
                keypoints_3d_gt[:, :3] = camera.world_to_camera(
                    keypoints_3d_gt[:, :3])

                # transform relative depth to absolute depth
                keypoints_3d[:21, 2] += keypoints_3d_gt[20, 2]
                keypoints_3d[21:, 2] += keypoints_3d_gt[41, 2]

                # transform keypoints from pixel space to camera space
                keypoints_3d[:, :3] = camera.pixel_to_camera(
                    keypoints_3d[:, :3])

            # rotate the keypoint to make z-axis correspondent to height
            # for better visualization
            vis_R = np.array([[1, 0, 0], [0, 0, -1], [0, 1, 0]])
            keypoints_3d[:, :3] = keypoints_3d[:, :3] @ vis_R
            if keypoints_3d_gt is not None:
                keypoints_3d_gt[:, :3] = keypoints_3d_gt[:, :3] @ vis_R

            # rebase height (z-axis)
            if args.rebase_keypoint_height:
                valid = keypoints_3d[..., 3] > 0
                keypoints_3d[..., 2] -= np.min(keypoints_3d[valid, 2],
                                               axis=-1,
                                               keepdims=True)
            res['keypoints_3d'] = keypoints_3d
            res['keypoints_3d_gt'] = keypoints_3d_gt

            # Add title
            instance_id = res.get('track_id', idx)
            res['title'] = f'Prediction ({instance_id})'
            pose_results_vis.append(res)
            # Add ground truth
            if args.show_ground_truth:
                if keypoints_3d_gt is None:
                    print('Fail to show ground truth. Please make sure that'
                          ' gt-joints-file is provided.')
                else:
                    gt = res.copy()
                    if args.rebase_keypoint_height:
                        valid = keypoints_3d_gt[..., 3] > 0
                        keypoints_3d_gt[...,
                                        2] -= np.min(keypoints_3d_gt[valid, 2],
                                                     axis=-1,
                                                     keepdims=True)
                    gt['keypoints_3d'] = keypoints_3d_gt
                    gt['title'] = f'Ground truth ({instance_id})'
                    pose_results_vis.append(gt)

        # Visualization
        if args.out_img_root is None:
            out_file = None
        else:
            os.makedirs(args.out_img_root, exist_ok=True)
            out_file = osp.join(args.out_img_root, f'vis_{i}.jpg')

        vis_3d_pose_result(
            pose_model,
            result=pose_results_vis,
            img=det_results[0]['image_name'],
            out_file=out_file,
            dataset=dataset,
            show=args.show,
            kpt_score_thr=args.kpt_thr,
            radius=args.radius,
            thickness=args.thickness,
        )