Ejemplo n.º 1
0
def test_bottom_up_demo():

    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(
        'configs/body/2d_kpt_sview_rgb_img/associative_embedding/'
        'coco/res50_coco_512x512.py',
        None,
        device='cpu')

    image_name = 'tests/data/coco/000000000785.jpg'
    dataset_info = DatasetInfo(pose_model.cfg.data['test'].get(
        'dataset_info', None))

    pose_results, _ = inference_bottom_up_pose_model(pose_model,
                                                     image_name,
                                                     dataset_info=dataset_info)

    # show the results
    vis_pose_result(pose_model,
                    image_name,
                    pose_results,
                    dataset_info=dataset_info)

    # test dataset_info without sigmas
    pose_model_copy = copy.deepcopy(pose_model)

    pose_model_copy.cfg.data.test.dataset_info.pop('sigmas')
    pose_results, _ = inference_bottom_up_pose_model(pose_model_copy,
                                                     image_name,
                                                     dataset_info=dataset_info)
Ejemplo n.º 2
0
def test_bottom_up_pose_tracking_demo():
    # COCO demo
    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(
        'configs/body/2d_kpt_sview_rgb_img/associative_embedding/'
        'coco/res50_coco_512x512.py',
        None,
        device='cpu')

    image_name = 'tests/data/coco/000000000785.jpg'

    pose_results, _ = inference_bottom_up_pose_model(pose_model, image_name)

    pose_results, next_id = get_track_id(pose_results, [], next_id=0)

    # show the results
    vis_pose_tracking_result(pose_model,
                             image_name,
                             pose_results,
                             dataset='BottomUpCocoDataset')

    pose_results_last = pose_results

    # oks
    pose_results, next_id = get_track_id(pose_results,
                                         pose_results_last,
                                         next_id=next_id,
                                         use_oks=True)

    pose_results_last = pose_results
    # one_euro
    pose_results, next_id = get_track_id(pose_results,
                                         pose_results_last,
                                         next_id=next_id,
                                         use_one_euro=True)
Ejemplo n.º 3
0
def main(args):
    os.makedirs(args.out_dir, exist_ok=True)

    # Inference single image by native apis.
    model = init_pose_model(args.config, args.checkpoint, device=args.device)
    if isinstance(model, TopDown):
        pytorch_result, _ = inference_top_down_pose_model(model,
                                                          args.img,
                                                          person_results=None)
    elif isinstance(model, (AssociativeEmbedding, )):
        pytorch_result, _ = inference_bottom_up_pose_model(model, args.img)
    else:
        raise NotImplementedError()

    vis_pose_result(model,
                    args.img,
                    pytorch_result,
                    out_file=osp.join(args.out_dir, 'pytorch_result.png'))

    # Inference single image by torchserve engine.
    url = 'http://' + args.inference_addr + '/predictions/' + args.model_name
    with open(args.img, 'rb') as image:
        response = requests.post(url, image)
    server_result = response.json()

    vis_pose_result(model,
                    args.img,
                    server_result,
                    out_file=osp.join(args.out_dir, 'torchserve_result.png'))
Ejemplo n.º 4
0
def test_bottom_up_demo():

    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(
        'configs/bottom_up/resnet/coco/res50_coco_512x512.py',
        None,
        device='cpu')

    image_name = 'tests/data/coco/000000000785.jpg'

    pose_results, _ = inference_bottom_up_pose_model(pose_model, image_name)

    # show the results
    vis_pose_result(
        pose_model, image_name, pose_results, dataset='BottomUpCocoDataset')
Ejemplo n.º 5
0
def test_bottom_up_demo():
    skeleton = [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12],
                [7, 13], [6, 7], [6, 8], [7, 9], [8, 10], [9, 11], [2, 3],
                [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]]

    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(
        'configs/bottom_up/resnet/coco/res50_coco_512x512.py',
        None,
        device='cpu')

    image_name = 'tests/data/coco/000000000785.jpg'

    pose_results = inference_bottom_up_pose_model(pose_model, image_name)

    # show the results
    vis_pose_result(pose_model, image_name, pose_results, skeleton=skeleton)
Ejemplo n.º 6
0
def test_bottom_up_pose_tracking_demo():
    # COCO demo
    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(
        'configs/body/2d_kpt_sview_rgb_img/associative_embedding/'
        'coco/res50_coco_512x512.py',
        None,
        device='cpu')

    image_name = 'tests/data/coco/000000000785.jpg'
    dataset_info = DatasetInfo(pose_model.cfg.data['test']['dataset_info'])

    pose_results, _ = inference_bottom_up_pose_model(pose_model,
                                                     image_name,
                                                     dataset_info=dataset_info)

    pose_results, next_id = get_track_id(pose_results, [], next_id=0)

    # show the results
    vis_pose_tracking_result(pose_model,
                             image_name,
                             pose_results,
                             dataset_info=dataset_info)

    pose_results_last = pose_results

    # oks
    pose_results, next_id = get_track_id(pose_results,
                                         pose_results_last,
                                         next_id=next_id,
                                         use_oks=True)

    pose_results_last = pose_results

    # one_euro (will be deprecated)
    with pytest.deprecated_call():
        pose_results, next_id = get_track_id(pose_results,
                                             pose_results_last,
                                             next_id=next_id,
                                             use_one_euro=True)
def get_pose(
        img,
        result_path,
        pose_config='./mobilenetv2_coco_512x512.py',
        pose_checkpoint='./mobilenetv2_coco_512x512-4d96e309_20200816.pth',
        device='cpu',
        kpt_thr=0.5):

    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(pose_config,
                                 pose_checkpoint,
                                 device=device.lower())
    # optional
    return_heatmap = False
    dataset = pose_model.cfg.data['test']['type']
    assert (dataset == 'BottomUpCocoDataset')

    # e.g. use ('backbone', ) to return backbone feature
    output_layer_names = None
    img = cv2.imread(img)

    pose_results, returned_outputs = inference_bottom_up_pose_model(
        pose_model,
        img,
        return_heatmap=return_heatmap,
        outputs=output_layer_names)
    # show the results
    vis_img = vis_pose_result(pose_model,
                              img,
                              pose_results,
                              dataset=dataset,
                              kpt_score_thr=kpt_thr,
                              show=False)
    cv2.imwrite(result_path, vis_img)

    sample0 = {"url": result_path}

    res_list = [sample0]

    return res_list
def main():
    args = parse_args()

    device = torch.device(args.device)

    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(args.pose_config,
                                 args.pose_checkpoint,
                                 device=args.device.lower())
    # optional
    return_heatmap = False
    dataset = pose_model.cfg.data['test']['type']
    assert (dataset == 'BottomUpCocoDataset')

    # e.g. use ('backbone', ) to return backbone feature
    output_layer_names = None
    print('Press "Esc", "q" or "Q" to exit.')
    while True:
        # ret_val, img = camera.read()
        img = cv2.imread(args.img_root)

        pose_results, returned_outputs = inference_bottom_up_pose_model(
            pose_model,
            img,
            return_heatmap=return_heatmap,
            outputs=output_layer_names)
        # show the results
        vis_img = vis_pose_result(pose_model,
                                  img,
                                  pose_results,
                                  dataset=dataset,
                                  kpt_score_thr=args.kpt_thr,
                                  show=False)
        ch = cv2.waitKey(1)
        if ch == 27 or ch == ord('q') or ch == ord('Q'):
            break

        cv2.imshow('Image', vis_img)
Ejemplo n.º 9
0
def main():
    """Visualize the demo images."""
    parser = ArgumentParser()
    parser.add_argument('pose_config', help='Config file for pose')
    parser.add_argument('pose_checkpoint', help='Checkpoint file for pose')
    parser.add_argument('--video-path', type=str, help='Video path')
    parser.add_argument('--show',
                        action='store_true',
                        default=False,
                        help='whether to show visualizations.')
    parser.add_argument('--out-video-root',
                        default='',
                        help='Root of the output video file. '
                        'Default not saving the visualization video.')
    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device used for inference')
    parser.add_argument('--kpt-thr',
                        type=float,
                        default=0.3,
                        help='Keypoint score threshold')

    args = parser.parse_args()

    assert args.show or (args.out_video_root != '')

    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(args.pose_config,
                                 args.pose_checkpoint,
                                 device=args.device)

    dataset = pose_model.cfg.data['test']['type']
    assert (dataset == 'BottomUpCocoDataset')

    cap = cv2.VideoCapture(args.video_path)

    if args.out_video_root == '':
        save_out_video = False
    else:
        os.makedirs(args.out_video_root, exist_ok=True)
        save_out_video = True

    if save_out_video:
        fps = cap.get(cv2.CAP_PROP_FPS)
        size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
                int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        videoWriter = cv2.VideoWriter(
            os.path.join(args.out_video_root,
                         f'vis_{os.path.basename(args.video_path)}'), fourcc,
            fps, size)

    while (cap.isOpened()):
        flag, img = cap.read()
        if not flag:
            break

        pose_results = inference_bottom_up_pose_model(pose_model, img)

        # show the results
        vis_img = vis_pose_result(pose_model,
                                  img,
                                  pose_results,
                                  dataset=dataset,
                                  kpt_score_thr=args.kpt_thr,
                                  show=False)

        if args.show:
            cv2.imshow('Image', vis_img)

        if save_out_video:
            videoWriter.write(vis_img)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    if save_out_video:
        videoWriter.release()
    cv2.destroyAllWindows()
Ejemplo n.º 10
0
def main():
    """Visualize the demo images."""
    parser = ArgumentParser()
    parser.add_argument('pose_config', help='Config file for detection')
    parser.add_argument('pose_checkpoint', help='Checkpoint file')
    parser.add_argument('--img-root', type=str, default='', help='Image root')
    parser.add_argument('--json-file',
                        type=str,
                        default='',
                        help='Json file containing image info.')
    parser.add_argument('--show',
                        action='store_true',
                        default=False,
                        help='whether to show img')
    parser.add_argument('--out-img-root',
                        type=str,
                        default='',
                        help='Root of the output img file. '
                        'Default not saving the visualization images.')
    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device used for inference')
    parser.add_argument('--kpt-thr',
                        type=float,
                        default=0.3,
                        help='Keypoint score threshold')

    args = parser.parse_args()

    assert args.show or (args.out_img_root != '')

    skeleton = [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12],
                [7, 13], [6, 7], [6, 8], [7, 9], [8, 10], [9, 11], [2, 3],
                [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]]

    coco = COCO(args.json_file)
    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(args.pose_config,
                                 args.pose_checkpoint,
                                 device=args.device)

    img_keys = list(coco.imgs.keys())

    # process each image
    for i in range(len(img_keys)):
        image_id = img_keys[i]
        image = coco.loadImgs(image_id)[0]
        image_name = os.path.join(args.img_root, image['file_name'])

        # test a single image, with a list of bboxes.
        pose_results = inference_bottom_up_pose_model(pose_model, image_name)

        if args.out_img_root == '':
            out_file = None
        else:
            os.makedirs(args.out_img_root, exist_ok=True)
            out_file = os.path.join(args.out_img_root, f'vis_{i}.jpg')

        # show the results
        vis_pose_result(pose_model,
                        image_name,
                        pose_results,
                        skeleton=skeleton,
                        kpt_score_thr=args.kpt_thr,
                        show=args.show,
                        out_file=out_file)
def main():
    """Visualize the demo images."""
    parser = ArgumentParser()
    parser.add_argument('pose_config', help='Config file for pose')
    parser.add_argument('pose_checkpoint', help='Checkpoint file for pose')
    parser.add_argument('--video-path', type=str, help='Video path')
    parser.add_argument('--show',
                        action='store_true',
                        default=False,
                        help='whether to show visualizations.')
    parser.add_argument('--out-video-root',
                        default='',
                        help='Root of the output video file. '
                        'Default not saving the visualization video.')
    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device used for inference')
    parser.add_argument('--kpt-thr',
                        type=float,
                        default=0.5,
                        help='Keypoint score threshold')
    parser.add_argument('--pose-nms-thr',
                        type=float,
                        default=0.9,
                        help='OKS threshold for pose NMS')
    parser.add_argument('--use-oks-tracking',
                        action='store_true',
                        help='Using OKS tracking')
    parser.add_argument('--tracking-thr',
                        type=float,
                        default=0.3,
                        help='Tracking threshold')
    parser.add_argument('--euro',
                        action='store_true',
                        help='Using One_Euro_Filter for smoothing')
    parser.add_argument('--radius',
                        type=int,
                        default=4,
                        help='Keypoint radius for visualization')
    parser.add_argument('--thickness',
                        type=int,
                        default=1,
                        help='Link thickness for visualization')

    args = parser.parse_args()

    assert args.show or (args.out_video_root != '')

    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(args.pose_config,
                                 args.pose_checkpoint,
                                 device=args.device.lower())

    dataset = pose_model.cfg.data['test']['type']
    assert (dataset == 'BottomUpCocoDataset')

    cap = cv2.VideoCapture(args.video_path)
    fps = None

    assert cap.isOpened(), f'Faild to load video file {args.video_path}'

    if args.out_video_root == '':
        save_out_video = False
    else:
        os.makedirs(args.out_video_root, exist_ok=True)
        save_out_video = True

    if save_out_video:
        fps = cap.get(cv2.CAP_PROP_FPS)
        size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
                int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        videoWriter = cv2.VideoWriter(
            os.path.join(args.out_video_root,
                         f'vis_{os.path.basename(args.video_path)}'), fourcc,
            fps, size)

    # optional
    return_heatmap = False

    # e.g. use ('backbone', ) to return backbone feature
    output_layer_names = None
    next_id = 0
    pose_results = []
    while (cap.isOpened()):
        flag, img = cap.read()
        if not flag:
            break
        pose_results_last = pose_results

        pose_results, returned_outputs = inference_bottom_up_pose_model(
            pose_model,
            img,
            pose_nms_thr=args.pose_nms_thr,
            return_heatmap=return_heatmap,
            outputs=output_layer_names)

        # get track id for each person instance
        pose_results, next_id = get_track_id(pose_results,
                                             pose_results_last,
                                             next_id,
                                             use_oks=args.use_oks_tracking,
                                             tracking_thr=args.tracking_thr,
                                             use_one_euro=args.euro,
                                             fps=fps)

        # show the results
        vis_img = vis_pose_tracking_result(pose_model,
                                           img,
                                           pose_results,
                                           radius=args.radius,
                                           thickness=args.thickness,
                                           dataset=dataset,
                                           kpt_score_thr=args.kpt_thr,
                                           show=False)

        if args.show:
            cv2.imshow('Image', vis_img)

        if save_out_video:
            videoWriter.write(vis_img)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    if save_out_video:
        videoWriter.release()
    cv2.destroyAllWindows()
Ejemplo n.º 12
0
def main():
    """Visualize the demo images."""
    parser = ArgumentParser()
    parser.add_argument('pose_config', help='Config file for pose')
    parser.add_argument('pose_checkpoint', help='Checkpoint file for pose')
    parser.add_argument('--video-path', type=str, help='Video path')
    parser.add_argument('--show',
                        action='store_true',
                        default=False,
                        help='whether to show visualizations.')
    parser.add_argument('--out-video-root',
                        default='',
                        help='Root of the output video file. '
                        'Default not saving the visualization video.')
    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device used for inference')
    parser.add_argument('--kpt-thr',
                        type=float,
                        default=0.5,
                        help='Keypoint score threshold')
    parser.add_argument('--pose-nms-thr',
                        type=float,
                        default=0.9,
                        help='OKS threshold for pose NMS')
    parser.add_argument('--use-oks-tracking',
                        action='store_true',
                        help='Using OKS tracking')
    parser.add_argument('--tracking-thr',
                        type=float,
                        default=0.3,
                        help='Tracking threshold')
    parser.add_argument(
        '--euro',
        action='store_true',
        help='(Deprecated, please use --smooth and --smooth-filter-cfg) '
        'Using One_Euro_Filter for smoothing.')
    parser.add_argument(
        '--smooth',
        action='store_true',
        help='Apply a temporal filter to smooth the pose estimation results. '
        'See also --smooth-filter-cfg.')
    parser.add_argument(
        '--smooth-filter-cfg',
        type=str,
        default='configs/_base_/filters/one_euro.py',
        help='Config file of the filter to smooth the pose estimation '
        'results. See also --smooth.')
    parser.add_argument('--radius',
                        type=int,
                        default=4,
                        help='Keypoint radius for visualization')
    parser.add_argument('--thickness',
                        type=int,
                        default=1,
                        help='Link thickness for visualization')

    args = parser.parse_args()

    assert args.show or (args.out_video_root != '')

    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(args.pose_config,
                                 args.pose_checkpoint,
                                 device=args.device.lower())

    dataset = pose_model.cfg.data['test']['type']
    dataset_info = pose_model.cfg.data['test'].get('dataset_info', None)
    if dataset_info is None:
        warnings.warn(
            'Please set `dataset_info` in the config.'
            'Check https://github.com/open-mmlab/mmpose/pull/663 for details.',
            DeprecationWarning)
        assert (dataset == 'BottomUpCocoDataset')
    else:
        dataset_info = DatasetInfo(dataset_info)

    video = mmcv.VideoReader(args.video_path)
    assert video.opened, f'Faild to load video file {args.video_path}'

    if args.out_video_root == '':
        save_out_video = False
    else:
        os.makedirs(args.out_video_root, exist_ok=True)
        save_out_video = True

    if save_out_video:
        fps = video.fps
        size = (video.width, video.height)
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        videoWriter = cv2.VideoWriter(
            os.path.join(args.out_video_root,
                         f'vis_{os.path.basename(args.video_path)}'), fourcc,
            fps, size)

    # optional
    return_heatmap = False

    # build pose smoother for temporal refinement
    if args.euro:
        warnings.warn(
            'Argument --euro will be deprecated in the future. '
            'Please use --smooth to enable temporal smoothing, and '
            '--smooth-filter-cfg to set the filter config.',
            DeprecationWarning)
        smoother = Smoother(filter_cfg='configs/_base_/filters/one_euro.py',
                            keypoint_dim=2)
    elif args.smooth:
        smoother = Smoother(filter_cfg=args.smooth_filter_cfg, keypoint_dim=2)
    else:
        smoother = None

    # e.g. use ('backbone', ) to return backbone feature
    output_layer_names = None
    next_id = 0
    pose_results = []
    for cur_frame in mmcv.track_iter_progress(video):
        pose_results_last = pose_results

        pose_results, _ = inference_bottom_up_pose_model(
            pose_model,
            cur_frame,
            dataset=dataset,
            dataset_info=dataset_info,
            pose_nms_thr=args.pose_nms_thr,
            return_heatmap=return_heatmap,
            outputs=output_layer_names)

        # get track id for each person instance
        pose_results, next_id = get_track_id(pose_results,
                                             pose_results_last,
                                             next_id,
                                             use_oks=args.use_oks_tracking,
                                             tracking_thr=args.tracking_thr,
                                             use_one_euro=args.euro,
                                             fps=fps)

        # post-process the pose results with smoother
        if smoother:
            pose_results = smoother.smooth(pose_results)

        # show the results
        vis_frame = vis_pose_tracking_result(pose_model,
                                             cur_frame,
                                             pose_results,
                                             radius=args.radius,
                                             thickness=args.thickness,
                                             dataset=dataset,
                                             dataset_info=dataset_info,
                                             kpt_score_thr=args.kpt_thr,
                                             show=False)

        if args.show:
            cv2.imshow('Image', vis_frame)

        if save_out_video:
            videoWriter.write(vis_frame)

        if args.show and cv2.waitKey(1) & 0xFF == ord('q'):
            break

    if save_out_video:
        videoWriter.release()
    if args.show:
        cv2.destroyAllWindows()
Ejemplo n.º 13
0
 def _inference_bottom_up_pose_model(self, data):
     results = []
     for image in data:
         preds, _ = inference_bottom_up_pose_model(self.model, image)
         results.append(preds)
     return results
Ejemplo n.º 14
0
def main():
    """Visualize the demo images."""
    parser = ArgumentParser()
    parser.add_argument('pose_config', help='Config file for detection')
    parser.add_argument('pose_checkpoint', help='Checkpoint file')
    parser.add_argument(
        '--img-path',
        type=str,
        help='Path to an image file or a image folder.')
    parser.add_argument(
        '--show',
        action='store_true',
        default=False,
        help='whether to show img')
    parser.add_argument(
        '--out-img-root',
        type=str,
        default='',
        help='Root of the output img file. '
        'Default not saving the visualization images.')
    parser.add_argument(
        '--device', default='cuda:0', help='Device used for inference')
    parser.add_argument(
        '--kpt-thr', type=float, default=0.3, help='Keypoint score threshold')
    parser.add_argument(
        '--pose-nms-thr',
        type=float,
        default=0.9,
        help='OKS threshold for pose NMS')
    parser.add_argument(
        '--radius',
        type=int,
        default=4,
        help='Keypoint radius for visualization')
    parser.add_argument(
        '--thickness',
        type=int,
        default=1,
        help='Link thickness for visualization')

    args = parser.parse_args()

    assert args.show or (args.out_img_root != '')

    # prepare image list
    if osp.isfile(args.img_path):
        image_list = [args.img_path]
    elif osp.isdir(args.img_path):
        image_list = [
            osp.join(args.img_path, fn) for fn in os.listdir(args.img_path)
            if fn.lower().endswith(('.png', '.jpg', '.jpeg', '.tiff', '.bmp'))
        ]
    else:
        raise ValueError('Image path should be an image or image folder.'
                         f'Got invalid image path: {args.img_path}')

    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(
        args.pose_config, args.pose_checkpoint, device=args.device.lower())

    dataset = pose_model.cfg.data['test']['type']
    dataset_info = pose_model.cfg.data['test'].get('dataset_info', None)
    if dataset_info is None:
        warnings.warn(
            'Please set `dataset_info` in the config.'
            'Check https://github.com/open-mmlab/mmpose/pull/663 for details.',
            DeprecationWarning)
        assert (dataset == 'BottomUpCocoDataset')
    else:
        dataset_info = DatasetInfo(dataset_info)

    # optional
    return_heatmap = False

    # e.g. use ('backbone', ) to return backbone feature
    output_layer_names = None

    # process each image
    for image_name in mmcv.track_iter_progress(image_list):

        # test a single image, with a list of bboxes.
        pose_results, returned_outputs = inference_bottom_up_pose_model(
            pose_model,
            image_name,
            dataset=dataset,
            dataset_info=dataset_info,
            pose_nms_thr=args.pose_nms_thr,
            return_heatmap=return_heatmap,
            outputs=output_layer_names)

        if args.out_img_root == '':
            out_file = None
        else:
            os.makedirs(args.out_img_root, exist_ok=True)
            out_file = os.path.join(
                args.out_img_root,
                f'vis_{osp.splitext(osp.basename(image_name))[0]}.jpg')

        # show the results
        vis_pose_result(
            pose_model,
            image_name,
            pose_results,
            radius=args.radius,
            thickness=args.thickness,
            dataset=dataset,
            dataset_info=dataset_info,
            kpt_score_thr=args.kpt_thr,
            show=args.show,
            out_file=out_file)
Ejemplo n.º 15
0
def test_inference_without_dataset_info():
    # Top down
    pose_model = init_pose_model(
        'configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/'
        'coco/res50_coco_256x192.py',
        None,
        device='cpu')

    if 'dataset_info' in pose_model.cfg:
        _ = pose_model.cfg.pop('dataset_info')

    image_name = 'tests/data/coco/000000000785.jpg'
    person_result = []
    person_result.append({'bbox': [50, 50, 50, 100]})

    with pytest.warns(DeprecationWarning):
        pose_results, _ = inference_top_down_pose_model(pose_model,
                                                        image_name,
                                                        person_result,
                                                        format='xywh')

    with pytest.warns(DeprecationWarning):
        vis_pose_result(pose_model, image_name, pose_results)

    with pytest.raises(NotImplementedError):
        with pytest.warns(DeprecationWarning):
            pose_results, _ = inference_top_down_pose_model(pose_model,
                                                            image_name,
                                                            person_result,
                                                            format='xywh',
                                                            dataset='test')

    # Bottom up
    pose_model = init_pose_model(
        'configs/body/2d_kpt_sview_rgb_img/associative_embedding/'
        'coco/res50_coco_512x512.py',
        None,
        device='cpu')
    if 'dataset_info' in pose_model.cfg:
        _ = pose_model.cfg.pop('dataset_info')

    image_name = 'tests/data/coco/000000000785.jpg'

    with pytest.warns(DeprecationWarning):
        pose_results, _ = inference_bottom_up_pose_model(
            pose_model, image_name)
    with pytest.warns(DeprecationWarning):
        vis_pose_result(pose_model, image_name, pose_results)

    # Top down tracking
    pose_model = init_pose_model(
        'configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/'
        'coco/res50_coco_256x192.py',
        None,
        device='cpu')

    if 'dataset_info' in pose_model.cfg:
        _ = pose_model.cfg.pop('dataset_info')

    image_name = 'tests/data/coco/000000000785.jpg'
    person_result = [{'bbox': [50, 50, 50, 100]}]

    with pytest.warns(DeprecationWarning):
        pose_results, _ = inference_top_down_pose_model(pose_model,
                                                        image_name,
                                                        person_result,
                                                        format='xywh')

    pose_results, _ = get_track_id(pose_results, [], next_id=0)

    with pytest.warns(DeprecationWarning):
        vis_pose_tracking_result(pose_model, image_name, pose_results)

    with pytest.raises(NotImplementedError):
        with pytest.warns(DeprecationWarning):
            vis_pose_tracking_result(pose_model,
                                     image_name,
                                     pose_results,
                                     dataset='test')

    # Bottom up tracking
    pose_model = init_pose_model(
        'configs/body/2d_kpt_sview_rgb_img/associative_embedding/'
        'coco/res50_coco_512x512.py',
        None,
        device='cpu')

    if 'dataset_info' in pose_model.cfg:
        _ = pose_model.cfg.pop('dataset_info')

    image_name = 'tests/data/coco/000000000785.jpg'
    with pytest.warns(DeprecationWarning):
        pose_results, _ = inference_bottom_up_pose_model(
            pose_model, image_name)

    pose_results, next_id = get_track_id(pose_results, [], next_id=0)

    with pytest.warns(DeprecationWarning):
        vis_pose_tracking_result(pose_model,
                                 image_name,
                                 pose_results,
                                 dataset='BottomUpCocoDataset')

    # Pose lifting
    pose_model = init_pose_model(
        'configs/body/3d_kpt_sview_rgb_img/pose_lift/'
        'h36m/simplebaseline3d_h36m.py',
        None,
        device='cpu')

    pose_det_result = {
        'keypoints': np.zeros((17, 3)),
        'bbox': [50, 50, 50, 50],
        'track_id': 0,
        'image_name': 'tests/data/h36m/S1_Directions_1.54138969_000001.jpg',
    }

    if 'dataset_info' in pose_model.cfg:
        _ = pose_model.cfg.pop('dataset_info')

    pose_results_2d = [[pose_det_result]]

    dataset = pose_model.cfg.data['test']['type']

    pose_results_2d = extract_pose_sequence(pose_results_2d,
                                            frame_idx=0,
                                            causal=False,
                                            seq_len=1,
                                            step=1)

    with pytest.warns(DeprecationWarning):
        _ = inference_pose_lifter_model(pose_model,
                                        pose_results_2d,
                                        dataset,
                                        with_track_id=False)

    with pytest.warns(DeprecationWarning):
        pose_lift_results = inference_pose_lifter_model(pose_model,
                                                        pose_results_2d,
                                                        dataset,
                                                        with_track_id=True)

    for res in pose_lift_results:
        res['title'] = 'title'
    with pytest.warns(DeprecationWarning):
        vis_3d_pose_result(pose_model,
                           pose_lift_results,
                           img=pose_results_2d[0][0]['image_name'],
                           dataset=dataset)

    with pytest.raises(NotImplementedError):
        with pytest.warns(DeprecationWarning):
            _ = inference_pose_lifter_model(pose_model,
                                            pose_results_2d,
                                            dataset='test')
Ejemplo n.º 16
0
def main():
    """Visualize the demo images."""
    parser = ArgumentParser()
    parser.add_argument('pose_config', help='Config file for pose')
    parser.add_argument('pose_checkpoint', help='Checkpoint file for pose')
    parser.add_argument('--video-path', type=str, help='Video path')
    parser.add_argument('--show',
                        action='store_true',
                        default=False,
                        help='whether to show visualizations.')
    parser.add_argument('--out-video-root',
                        default='',
                        help='Root of the output video file. '
                        'Default not saving the visualization video.')
    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device used for inference')
    parser.add_argument('--kpt-thr',
                        type=float,
                        default=0.3,
                        help='Keypoint score threshold')
    parser.add_argument('--pose-nms-thr',
                        type=float,
                        default=0.9,
                        help='OKS threshold for pose NMS')
    parser.add_argument('--radius',
                        type=int,
                        default=4,
                        help='Keypoint radius for visualization')
    parser.add_argument('--thickness',
                        type=int,
                        default=1,
                        help='Link thickness for visualization')

    args = parser.parse_args()

    assert args.show or (args.out_video_root != '')

    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(args.pose_config,
                                 args.pose_checkpoint,
                                 device=args.device.lower())

    dataset = pose_model.cfg.data['test']['type']
    dataset_info = pose_model.cfg.data['test'].get('dataset_info', None)
    if dataset_info is None:
        warnings.warn(
            'Please set `dataset_info` in the config.'
            'Check https://github.com/open-mmlab/mmpose/pull/663 for details.',
            DeprecationWarning)
        assert (dataset == 'BottomUpCocoDataset')
    else:
        dataset_info = DatasetInfo(dataset_info)

    # read video
    video = mmcv.VideoReader(args.video_path)
    assert video.opened, f'Faild to load video file {args.video_path}'

    if args.out_video_root == '':
        save_out_video = False
    else:
        os.makedirs(args.out_video_root, exist_ok=True)
        save_out_video = True

    if save_out_video:
        fps = video.fps
        size = (video.width, video.height)
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        videoWriter = cv2.VideoWriter(
            os.path.join(args.out_video_root,
                         f'vis_{os.path.basename(args.video_path)}'), fourcc,
            fps, size)

    # optional
    return_heatmap = False

    # e.g. use ('backbone', ) to return backbone feature
    output_layer_names = None

    print('Running inference...')
    for _, cur_frame in enumerate(mmcv.track_iter_progress(video)):
        pose_results, _ = inference_bottom_up_pose_model(
            pose_model,
            cur_frame,
            dataset=dataset,
            dataset_info=dataset_info,
            pose_nms_thr=args.pose_nms_thr,
            return_heatmap=return_heatmap,
            outputs=output_layer_names)

        # show the results
        vis_frame = vis_pose_result(pose_model,
                                    cur_frame,
                                    pose_results,
                                    radius=args.radius,
                                    thickness=args.thickness,
                                    dataset=dataset,
                                    dataset_info=dataset_info,
                                    kpt_score_thr=args.kpt_thr,
                                    show=False)

        if args.show:
            cv2.imshow('Image', vis_frame)

        if save_out_video:
            videoWriter.write(vis_frame)

        if args.show and cv2.waitKey(1) & 0xFF == ord('q'):
            break

    if save_out_video:
        videoWriter.release()
    if args.show:
        cv2.destroyAllWindows()
Ejemplo n.º 17
0
def main():
    """Visualize the demo images."""
    parser = ArgumentParser()
    parser.add_argument('pose_config', help='Config file for detection')
    parser.add_argument('pose_checkpoint', help='Checkpoint file')
    parser.add_argument('--img-root', type=str, default='', help='Image root')
    parser.add_argument('--json-file',
                        type=str,
                        default='',
                        help='Json file containing image info.')
    parser.add_argument('--show',
                        action='store_true',
                        default=False,
                        help='whether to show img')
    parser.add_argument('--out-img-root',
                        type=str,
                        default='',
                        help='Root of the output img file. '
                        'Default not saving the visualization images.')
    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device used for inference')
    parser.add_argument('--kpt-thr',
                        type=float,
                        default=0.3,
                        help='Keypoint score threshold')

    args = parser.parse_args()

    assert args.show or (args.out_img_root != '')

    coco = COCO(args.json_file)
    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(args.pose_config,
                                 args.pose_checkpoint,
                                 device=args.device)

    dataset = pose_model.cfg.data['test']['type']
    assert (dataset == 'BottomUpCocoDataset')

    img_keys = list(coco.imgs.keys())

    # optional
    return_heatmap = False

    # e.g. use ('backbone', ) to return backbone feature
    output_layer_names = None

    # process each image
    for i in range(len(img_keys)):
        image_id = img_keys[i]
        image = coco.loadImgs(image_id)[0]
        image_name = os.path.join(args.img_root, image['file_name'])

        # test a single image, with a list of bboxes.
        pose_results, returned_outputs = inference_bottom_up_pose_model(
            pose_model,
            image_name,
            return_heatmap=return_heatmap,
            outputs=output_layer_names)

        if args.out_img_root == '':
            out_file = None
        else:
            os.makedirs(args.out_img_root, exist_ok=True)
            out_file = os.path.join(args.out_img_root, f'vis_{i}.jpg')

        # show the results
        vis_pose_result(pose_model,
                        image_name,
                        pose_results,
                        dataset=dataset,
                        kpt_score_thr=args.kpt_thr,
                        show=args.show,
                        out_file=out_file)
Ejemplo n.º 18
0
def main():
    """Visualize the demo images.

    Using mmdet to detect the human.
    """
    parser = ArgumentParser()
    parser.add_argument('pose_config', help='Config file for pose')
    parser.add_argument('pose_checkpoint', help='Checkpoint file for pose')
    parser.add_argument('--video-path', type=str, help='Video path')
    parser.add_argument('--show',
                        action='store_true',
                        default=False,
                        help='whether to show visualizations.')
    parser.add_argument('--out-video-root',
                        default='',
                        help='Root of the output video file. '
                        'Default not saving the visualization video.')
    parser.add_argument('--device',
                        default='cpu',
                        help='Device used for inference')
    parser.add_argument('--bbox-thr',
                        type=float,
                        default=0.3,
                        help='Bounding box score threshold')
    parser.add_argument('--kpt-thr',
                        type=float,
                        default=0.3,
                        help='Keypoint score threshold')
    # parser.add_argument('--csv-path', type=str, help='CSV path')

    args = parser.parse_args()

    assert args.show or (args.out_video_root != '')

    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(args.pose_config,
                                 args.pose_checkpoint,
                                 device=args.device)
    print('loaded poes model')

    dataset = pose_model.cfg.data['test']['type']
    assert (dataset == 'BottomUpCocoDataset')

    print(dataset)

    cap = cv2.VideoCapture(args.video_path)

    print('loaded video')

    frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

    if args.out_video_root == '':
        save_out_video = False
    else:
        os.makedirs(args.out_video_root, exist_ok=True)
        save_out_video = True
        print('save path: {0}'.format(args.out_video_root))

    if save_out_video:
        fps = cap.get(cv2.CAP_PROP_FPS)
        size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
                int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        videoWriter = cv2.VideoWriter(
            os.path.join(args.out_video_root,
                         f'vis_{os.path.basename(args.video_path)}').replace(
                             '.mp4', '-bottom.mp4'), fourcc, fps, size)

    print(pose_model.cfg.channel_cfg['dataset_joints'])
    poses = np.zeros((frames, pose_model.cfg.channel_cfg['dataset_joints'], 3))

    print(poses.shape)

    frame = 0
    t0 = time.perf_counter()
    prev_pose = 0

    width = (cap.get(3))
    height = (cap.get(4))

    print('width: {0}, height: {1}'.format(width, height))

    skip_ratio = 4

    person_bboxes = [[
        2 * width / 10, height / 8, 0.9 * width, 7 * height / 8, 1
    ]]
    # person_bboxes = [[width / 8, height / 8, 3 * width / 4, 3 * height / 4, 1]]
    print(person_bboxes)
    while (cap.isOpened()):
        t1 = time.perf_counter()
        flag, img = cap.read()

        if not flag:
            break

        # check every 2nd frame
        if frame % skip_ratio == 0:
            # test a single image, the resulting box is (x1, y1, x2, y2)
            # det_results = inference_detector(det_model, img)
            # # keep the person class bounding boxes.
            #
            # person_bboxes = np.expand_dims(
            #     np.array(det_results[0])[0, :], axis=0)
            #
            # print(person_bboxes)

            # test a single image, with a list of bboxes.
            pose_results = inference_bottom_up_pose_model(pose_model, img)

            t = time.perf_counter()
            print('Frame {0} analysed in {1} secs. Total time: {2} secs\
                    '.format(frame, t - t1, t - t0))

            # print(pose_results)
            # np_results = np.asarray(pose_results[0]['keypoints'])
            # print(pose_results[0]['keypoints'])
            # print('Result shape: {0}'.format(np_results.shape))

            # show the results
            if np.shape(pose_results)[0] > 0:
                prev_pose = pose_results
                x_ratios = pose_results[0]['keypoints'][:, 0] / width
                y_ratios = pose_results[0]['keypoints'][:, 1] / height
                poses[frame, :, 0] = x_ratios
                poses[frame, :, 1] = y_ratios
                if frame == 0:
                    print(x_ratios)
            else:
                pose_results = prev_pose  # or maybe just skip saving
                print('lol')

            # print('bbox shape: {0}'.format(np.array(person_bboxes)))
            #
            # print('pose_result shape: {0}'.format(
            #     np.array(pose_results).shape))
            # cv2.imshow('Image', img)
            # cv2.waitKey(0)
        else:
            pose_results = prev_pose

        vis_img = vis_pose_result(pose_model,
                                  img,
                                  pose_results,
                                  dataset=dataset,
                                  kpt_score_thr=args.kpt_thr,
                                  show=False)

        # if
        # pose_results =

        # poses[frame, ...] = pose_results[0]['keypoints']

        # print(frame)

        if args.show or frame % skip_ratio == 0:
            cv2.imshow('Image', vis_img)
        frame += 1

        if save_out_video:
            videoWriter.write(vis_img)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    if save_out_video:
        videoWriter.release()
        out_file = os.path.join(args.out_video_root,
                                os.path.basename(args.video_path)).replace(
                                    '.mp4', '-bottom.npy')
        np.save(out_file, poses)

    cv2.destroyAllWindows()