Exemplo n.º 1
0
def test_bottom_up_demo():

    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(
        'configs/body/2d_kpt_sview_rgb_img/associative_embedding/'
        'coco/res50_coco_512x512.py',
        None,
        device='cpu')

    image_name = 'tests/data/coco/000000000785.jpg'
    dataset_info = DatasetInfo(pose_model.cfg.data['test'].get(
        'dataset_info', None))

    pose_results, _ = inference_bottom_up_pose_model(pose_model,
                                                     image_name,
                                                     dataset_info=dataset_info)

    # show the results
    vis_pose_result(pose_model,
                    image_name,
                    pose_results,
                    dataset_info=dataset_info)

    # test dataset_info without sigmas
    pose_model_copy = copy.deepcopy(pose_model)

    pose_model_copy.cfg.data.test.dataset_info.pop('sigmas')
    pose_results, _ = inference_bottom_up_pose_model(pose_model_copy,
                                                     image_name,
                                                     dataset_info=dataset_info)
Exemplo n.º 2
0
def main(args):
    os.makedirs(args.out_dir, exist_ok=True)

    # Inference single image by native apis.
    model = init_pose_model(args.config, args.checkpoint, device=args.device)
    if isinstance(model, TopDown):
        pytorch_result, _ = inference_top_down_pose_model(model,
                                                          args.img,
                                                          person_results=None)
    elif isinstance(model, (AssociativeEmbedding, )):
        pytorch_result, _ = inference_bottom_up_pose_model(model, args.img)
    else:
        raise NotImplementedError()

    vis_pose_result(model,
                    args.img,
                    pytorch_result,
                    out_file=osp.join(args.out_dir, 'pytorch_result.png'))

    # Inference single image by torchserve engine.
    url = 'http://' + args.inference_addr + '/predictions/' + args.model_name
    with open(args.img, 'rb') as image:
        response = requests.post(url, image)
    server_result = response.json()

    vis_pose_result(model,
                    args.img,
                    server_result,
                    out_file=osp.join(args.out_dir, 'torchserve_result.png'))
Exemplo n.º 3
0
def test_bottom_up_demo():

    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(
        'configs/bottom_up/resnet/coco/res50_coco_512x512.py',
        None,
        device='cpu')

    image_name = 'tests/data/coco/000000000785.jpg'

    pose_results, _ = inference_bottom_up_pose_model(pose_model, image_name)

    # show the results
    vis_pose_result(
        pose_model, image_name, pose_results, dataset='BottomUpCocoDataset')
Exemplo n.º 4
0
def test_bottom_up_demo():
    skeleton = [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12],
                [7, 13], [6, 7], [6, 8], [7, 9], [8, 10], [9, 11], [2, 3],
                [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]]

    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(
        'configs/bottom_up/resnet/coco/res50_coco_512x512.py',
        None,
        device='cpu')

    image_name = 'tests/data/coco/000000000785.jpg'

    pose_results = inference_bottom_up_pose_model(pose_model, image_name)

    # show the results
    vis_pose_result(pose_model, image_name, pose_results, skeleton=skeleton)
Exemplo n.º 5
0
def test_top_down_demo():
    skeleton = [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12],
                [7, 13], [6, 7], [6, 8], [7, 9], [8, 10], [9, 11], [2, 3],
                [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]]

    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(
        'configs/top_down/resnet/coco/res50_coco_256x192.py',
        None,
        device='cpu')

    image_name = 'tests/data/coco/000000000785.jpg'
    # test a single image, with a list of bboxes.
    pose_results = inference_top_down_pose_model(
        pose_model, image_name, [[50, 50, 50, 100]], format='xywh')

    # show the results
    vis_pose_result(pose_model, image_name, pose_results, skeleton=skeleton)
Exemplo n.º 6
0
def inference(detector,
              model,
              img,
              vis=False,
              bbox_thr=0.3,
              kpt_thr=0.3,
              dataset='TopDownCocoDataset',
              format='xyxy',
              return_heatmap=False,
              **kwargs):
    import torch as th
    from ml import cv
    from ml.vision.ops import dets_select
    # from xtcocotools.coco import COCO
    from mmpose.apis import (inference_top_down_pose_model, vis_pose_result)
    from mmpose.datasets import DatasetInfo

    model.to('cuda:0')
    model.eval()
    # result = model(return_loss=return_loss, **data)

    fp16 = kwargs.get('fp16', False)
    with th.cuda.amp.autocast(enabled=fp16):
        dets = detector.detect(img, size=640, conf_thres=0.4, iou_thres=0.5)
    persons = dets_select(dets, [0])
    ppls = [
        dets_f[persons_f].cpu().numpy()
        for dets_f, persons_f in zip(dets, persons)
    ]
    """
    Args:
        person_results(List[Tensor(N, 5)]): bboxes per class in order with scores
    """
    # print(ppls)
    person_results = [dict(bbox=ppl[:-1]) for ppl in ppls[0]]
    # print(person_results)
    pose_results, returned_outputs = inference_top_down_pose_model(
        model,
        img,
        person_results,
        bbox_thr=bbox_thr,
        format=format,
        dataset=dataset,
        # dataset_info=DatasetInfo({'dataset_name': dataset, 'flip_pairs': []}),
        return_heatmap=return_heatmap,
        outputs=None)
    if vis:
        img = cv.imread(img)
        vis_img = vis_pose_result(model,
                                  img,
                                  pose_results,
                                  dataset=dataset,
                                  kpt_score_thr=kpt_thr,
                                  show=False)
        return pose_results, vis_img
    return pose_results
Exemplo n.º 7
0
def test_top_down_demo():
    # COCO demo
    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(
        'configs/top_down/resnet/coco/res50_coco_256x192.py',
        None,
        device='cpu')
    image_name = 'tests/data/coco/000000000785.jpg'

    person_result = []
    person_result.append({'bbox': [50, 50, 50, 100]})
    # test a single image, with a list of bboxes.
    pose_results, _ = inference_top_down_pose_model(
        pose_model, image_name, person_result, format='xywh')
    # show the results
    vis_pose_result(pose_model, image_name, pose_results)

    # AIC demo
    pose_model = init_pose_model(
        'configs/top_down/resnet/aic/res50_aic_256x192.py', None, device='cpu')
    image_name = 'tests/data/aic/054d9ce9201beffc76e5ff2169d2af2f027002ca.jpg'
    # test a single image, with a list of bboxes.
    pose_results, _ = inference_top_down_pose_model(
        pose_model,
        image_name,
        person_result,
        format='xywh',
        dataset='TopDownAicDataset')
    # show the results
    vis_pose_result(
        pose_model, image_name, pose_results, dataset='TopDownAicDataset')

    # OneHand10K demo
    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(
        'configs/hand/resnet/onehand10k/res50_onehand10k_256x256.py',
        None,
        device='cpu')
    image_name = 'tests/data/onehand10k/9.jpg'
    # test a single image, with a list of bboxes.
    pose_results, _ = inference_top_down_pose_model(
        pose_model,
        image_name,
        person_result,
        format='xywh',
        dataset='OneHand10KDataset')
    # show the results
    vis_pose_result(
        pose_model, image_name, pose_results, dataset='OneHand10KDataset')

    with pytest.raises(NotImplementedError):
        pose_results, _ = inference_top_down_pose_model(
            pose_model,
            image_name,
            person_result,
            format='xywh',
            dataset='test')
Exemplo n.º 8
0
 def __render_mmp(self, img):
     if self.render_mmp and self.last_pose_results:
         img = vis_pose_result(self.pose_model,
                               img,
                               self.last_pose_results,
                               dataset=self.dataset,
                               kpt_score_thr=self.kpt_thr,
                               show=False)
         for result in self.last_pose_results:
             kp = result['keypoints']
             for i, p in enumerate(kp):
                 if i > 16:
                     break
                 x = int(p[0])
                 y = int(p[1])
                 cv2.putText(img, "{}".format(i), (x, y),
                             cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255),
                             1, cv2.LINE_AA)
     return img
def det_posestim(det_model, img, pose_model, args, dataset):
    det_results = inference_detector(det_model, img)

    person_bboxes = det_results[0].copy()

    pose_results = inference_top_down_pose_model(pose_model,
                                                 img,
                                                 person_bboxes,
                                                 bbox_thr=args.bbox_thr,
                                                 format='xyxy',
                                                 dataset=dataset)

    vis_img = vis_pose_result(pose_model,
                              img,
                              pose_results,
                              dataset=dataset,
                              kpt_score_thr=args.kpt_thr,
                              show=False)

    return vis_img, pose_results
def get_pose(
        img,
        result_path,
        pose_config='./mobilenetv2_coco_512x512.py',
        pose_checkpoint='./mobilenetv2_coco_512x512-4d96e309_20200816.pth',
        device='cpu',
        kpt_thr=0.5):

    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(pose_config,
                                 pose_checkpoint,
                                 device=device.lower())
    # optional
    return_heatmap = False
    dataset = pose_model.cfg.data['test']['type']
    assert (dataset == 'BottomUpCocoDataset')

    # e.g. use ('backbone', ) to return backbone feature
    output_layer_names = None
    img = cv2.imread(img)

    pose_results, returned_outputs = inference_bottom_up_pose_model(
        pose_model,
        img,
        return_heatmap=return_heatmap,
        outputs=output_layer_names)
    # show the results
    vis_img = vis_pose_result(pose_model,
                              img,
                              pose_results,
                              dataset=dataset,
                              kpt_score_thr=kpt_thr,
                              show=False)
    cv2.imwrite(result_path, vis_img)

    sample0 = {"url": result_path}

    res_list = [sample0]

    return res_list
def main():
    args = parse_args()

    device = torch.device(args.device)

    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(args.pose_config,
                                 args.pose_checkpoint,
                                 device=args.device.lower())
    # optional
    return_heatmap = False
    dataset = pose_model.cfg.data['test']['type']
    assert (dataset == 'BottomUpCocoDataset')

    # e.g. use ('backbone', ) to return backbone feature
    output_layer_names = None
    print('Press "Esc", "q" or "Q" to exit.')
    while True:
        # ret_val, img = camera.read()
        img = cv2.imread(args.img_root)

        pose_results, returned_outputs = inference_bottom_up_pose_model(
            pose_model,
            img,
            return_heatmap=return_heatmap,
            outputs=output_layer_names)
        # show the results
        vis_img = vis_pose_result(pose_model,
                                  img,
                                  pose_results,
                                  dataset=dataset,
                                  kpt_score_thr=args.kpt_thr,
                                  show=False)
        ch = cv2.waitKey(1)
        if ch == 27 or ch == ord('q') or ch == ord('Q'):
            break

        cv2.imshow('Image', vis_img)
def main():
    """Visualize the demo images.

    Using mmdet to detect the human.
    """
    parser = ArgumentParser()
    parser.add_argument('det_config', help='Config file for detection')
    parser.add_argument('det_checkpoint', help='Checkpoint file for detection')
    parser.add_argument('pose_config', help='Config file for pose')
    parser.add_argument('pose_checkpoint', help='Checkpoint file for pose')
    parser.add_argument('--video-path', type=str, help='Video path')
    parser.add_argument('--show',
                        action='store_true',
                        default=False,
                        help='whether to show visualizations.')
    parser.add_argument('--out-video-root',
                        default='',
                        help='Root of the output video file. '
                        'Default not saving the visualization video.')
    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device used for inference')
    parser.add_argument('--bbox-thr',
                        type=float,
                        default=0.3,
                        help='Bounding box score threshold')
    parser.add_argument('--kpt-thr',
                        type=float,
                        default=0.3,
                        help='Keypoint score threshold')

    assert has_mmdet, 'Please install mmdet to run the demo.'

    args = parser.parse_args()

    assert args.show or (args.out_video_root != '')
    assert args.det_config is not None
    assert args.det_checkpoint is not None

    det_model = init_detector(args.det_config,
                              args.det_checkpoint,
                              device=args.device.lower())
    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(args.pose_config,
                                 args.pose_checkpoint,
                                 device=args.device.lower())

    dataset = pose_model.cfg.data['test']['type']

    cap = cv2.VideoCapture(args.video_path)

    if args.out_video_root == '':
        save_out_video = False
    else:
        os.makedirs(args.out_video_root, exist_ok=True)
        save_out_video = True

    if save_out_video:
        fps = cap.get(cv2.CAP_PROP_FPS)
        size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
                int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        videoWriter = cv2.VideoWriter(
            os.path.join(args.out_video_root,
                         f'vis_{os.path.basename(args.video_path)}'), fourcc,
            fps, size)

    # optional
    return_heatmap = False

    # e.g. use ('backbone', ) to return backbone feature
    output_layer_names = None

    sum_det_time = 0.0
    sum_pose_time = 0.0

    while (cap.isOpened()):
        flag, img = cap.read()
        if not flag:
            break

        det_start_time = time.time()
        # test a single image, the resulting box is (x1, y1, x2, y2)
        mmdet_results = inference_detector(det_model, img)

        # keep the person class bounding boxes.
        person_results = process_mmdet_results(mmdet_results)

        sum_det_time += (time.time() - det_start_time)

        pose_start_time = time.time()
        # test a single image, with a list of bboxes.
        pose_results, returned_outputs = inference_top_down_pose_model(
            pose_model,
            img,
            person_results,
            bbox_thr=args.bbox_thr,
            format='xyxy',
            dataset=dataset,
            return_heatmap=return_heatmap,
            outputs=output_layer_names)
        sum_pose_time += (time.time() - pose_start_time)

        # show the results
        vis_img = vis_pose_result(pose_model,
                                  img,
                                  pose_results,
                                  dataset=dataset,
                                  kpt_score_thr=args.kpt_thr,
                                  show=False)

        if args.show:
            cv2.imshow('Image', vis_img)

        if save_out_video:
            videoWriter.write(vis_img)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    print('Det time', sum_det_time, 'Pose time', sum_pose_time)

    cap.release()
    if save_out_video:
        videoWriter.release()
    cv2.destroyAllWindows()
Exemplo n.º 13
0
def main():
    """Visualize the demo images."""
    parser = ArgumentParser()
    parser.add_argument('pose_config', help='Config file for pose')
    parser.add_argument('pose_checkpoint', help='Checkpoint file for pose')
    parser.add_argument('--video-path', type=str, help='Video path')
    parser.add_argument('--show',
                        action='store_true',
                        default=False,
                        help='whether to show visualizations.')
    parser.add_argument('--out-video-root',
                        default='',
                        help='Root of the output video file. '
                        'Default not saving the visualization video.')
    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device used for inference')
    parser.add_argument('--kpt-thr',
                        type=float,
                        default=0.3,
                        help='Keypoint score threshold')

    args = parser.parse_args()

    assert args.show or (args.out_video_root != '')

    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(args.pose_config,
                                 args.pose_checkpoint,
                                 device=args.device)

    dataset = pose_model.cfg.data['test']['type']
    assert (dataset == 'BottomUpCocoDataset')

    cap = cv2.VideoCapture(args.video_path)

    if args.out_video_root == '':
        save_out_video = False
    else:
        os.makedirs(args.out_video_root, exist_ok=True)
        save_out_video = True

    if save_out_video:
        fps = cap.get(cv2.CAP_PROP_FPS)
        size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
                int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        videoWriter = cv2.VideoWriter(
            os.path.join(args.out_video_root,
                         f'vis_{os.path.basename(args.video_path)}'), fourcc,
            fps, size)

    while (cap.isOpened()):
        flag, img = cap.read()
        if not flag:
            break

        pose_results = inference_bottom_up_pose_model(pose_model, img)

        # show the results
        vis_img = vis_pose_result(pose_model,
                                  img,
                                  pose_results,
                                  dataset=dataset,
                                  kpt_score_thr=args.kpt_thr,
                                  show=False)

        if args.show:
            cv2.imshow('Image', vis_img)

        if save_out_video:
            videoWriter.write(vis_img)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    if save_out_video:
        videoWriter.release()
    cv2.destroyAllWindows()
Exemplo n.º 14
0
def main():
    """Visualize the demo images.

    Using mmdet to detect the human.
    """
    parser = ArgumentParser()
    # parser.add_argument('det_config', help='Config file for detection')
    # parser.add_argument('det_checkpoint', help='Checkpoint file for detection')
    parser.add_argument('pose_config', help='Config file for pose')
    parser.add_argument('pose_checkpoint', help='Checkpoint file for pose')
    parser.add_argument('--video-path', type=str, help='Video path')
    parser.add_argument('--show', action='store_true', default=False,
                        help='whether to show visualizations.')
    parser.add_argument('--out-video-root', default='',
                        help='Root of the output video file. '
                        'Default not saving the visualization video.')
    parser.add_argument('--device', default='cpu',
                        help='Device used for inference')
    parser.add_argument('--bbox-thr', type=float, default=0.3,
                        help='Bounding box score threshold')
    parser.add_argument('--kpt-thr', type=float, default=0.3,
                        help='Keypoint score threshold')
    parser.add_argument('--file_name', type=str, default='')
    parser.add_argument('--only_box', type=bool, default=False)
    # parser.add_argument('--csv-path', type=str, help='CSV path')

    args = parser.parse_args()

    assert args.show or (args.out_video_root != '')
    # assert args.det_config is not None
    # assert args.det_checkpoint is not None

    # build the pose model from a config file and a checkpoint file

    pose_model = init_pose_model(args.pose_config, args.pose_checkpoint,
                                 device=args.device)
    print('loaded pose model')

    dataset = pose_model.cfg.data['test']['type']

    print(dataset)

    mod_used = pose_model.cfg.model['backbone']['type']

    print('model used {0}'.format(mod_used))

    cap = cv2.VideoCapture(args.video_path)
    print('loaded video...')
    print('checking orientation and position')

    flag, img = cap.read()
    cap.release()
    person_bboxes, flip = box_check(img)
    cap = cv2.VideoCapture(args.video_path)

    print(args.only_box)
    if args.only_box:
        # cv2.waitKey(0)
        return

    frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

    if args.out_video_root == '':
        save_out_video = False
    else:
        os.makedirs(args.out_video_root, exist_ok=True)
        save_out_video = True
        print('save path: {0}'.format(args.out_video_root))

    if save_out_video:
        fps = cap.get(cv2.CAP_PROP_FPS)
        if flip:
            size = (int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)),
                    int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)))
        else:
            size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
                    int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
        m_dim = max(size)
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        if args.file_name == '':
            fname = os.path.join(args.out_video_root,
                                 f'vis_{os.path.basename(args.video_path)}')
            # if os.path.basename(args.video_path).find()
            fname = fname.replace(fname[fname.find('.', -5)::], '')
            fname += mod_used + dataset + '.mp4'
            print('FN {0}'.format(fname))
            while os.path.isfile(fname):
                fname = fname.replace('.mp4', '')

                idx = fname.find('-', -4)
                if idx == -1:
                    fname += '-0.mp4'
                else:
                    fname = fname.replace(fname[idx + 1::],
                                          str(int(fname[idx + 1::])
                                              + 1) + '.mp4')
        else:
            fname = os.path.join(args.out_video_root, args.file_name)

        print(fname)
        videoWriter = cv2.VideoWriter(fname, fourcc, fps, size)

    print(pose_model.cfg.channel_cfg['num_output_channels'])
    poses = np.zeros((frames,
                      pose_model.cfg.channel_cfg['num_output_channels'], 3))
    # poses[-1, 0:2] = size
    print(poses.shape)

    frame = 0
    t0 = time.perf_counter()
    prev_pose = 0

    width = (cap.get(3))
    height = (cap.get(4))

    print('width: {0}, height: {1}'.format(width, height))

    skip_ratio = 1

    # person_bboxes = [[2 * width / 10, height /
    #                   8, 0.9 * width, 7 * height / 8, 1]]

    # person_bboxes = [[2 * width / 10, height /
    #                   5, 0.9 * width, 4 * height / 5, 1]]
    # person_bboxes = [[2*width/10, 0, 0.9*width, height, 1]]
    # person_bboxes = [[3 * width / 10, 0, 0.6 * width, height, 1]]
    # person_bboxes = [[35 * width / 10, 0.1 *
    #                   height, 0.7 * width, 0.95 * height, 1]]
    print(person_bboxes)
    # rmin = np.ones(2)
    # rmax = np.zeros(2)
    # lmin = np.ones(2)
    # lmax = np.zeros(2)
    lmin = 1
    lmax = 0
    rmin = 1
    rmax = 0
    while (cap.isOpened()):
        t1 = time.perf_counter()
        flag, img = cap.read()
        if flip:
            img = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE)
        if not flag:
            break

        # check every 2nd frame
        if frame % skip_ratio == 0:
            # test a single image, the resulting box is (x1, y1, x2, y2)
            # det_results = inference_detector(det_model, img)
            # # keep the person class bounding boxes.
            #
            # person_bboxes = np.expand_dims(
            #     np.array(det_results[0])[0, :], axis=0)
            #
            # print(person_bboxes)

            # test a single image, with a list of bboxes.
            pose_results = inference_top_down_pose_model(pose_model, img,
                                                         person_bboxes,
                                                         bbox_thr=args.bbox_thr,
                                                         format='xyxy',
                                                         dataset=dataset)

            t = time.perf_counter()
            print('Frame {0} out of {3} analysed in {1} secs. Total time: {2} secs\
                    '.format(frame, t - t1, t - t0, frames))

            # show the results
            if np.shape(pose_results)[0] > 0:
                prev_pose = pose_results
                # x_ratios = pose_results[0]['keypoints'][:, 0] / m_dim
                # y_ratios = pose_results[0]['keypoints'][:, 1] / m_dim
                ratios = pose_results[0]['keypoints'][:, 0:2] / m_dim

                lmin = min((ratios[13, 1], lmin))
                lmax = max((ratios[13, 1], lmax))
                rmin = min((ratios[14, 0], rmin))
                rmax = max((ratios[14, 1], rmax))
                # lmin[0] = min((ratios[13, 0], lmin[0]))
                # lmin[1] = min((ratios[13, 1], lmin[1]))
                # lmax[0] = max((ratios[13, 0], lmax[0]))
                # lmax[1] = max((ratios[13, 1], lmax[1]))
                #
                # rmin[0] = min((ratios[14, 0], rmin[0]))
                # rmin[1] = min((ratios[14, 1], rmin[1]))
                # rmax[0] = max((ratios[14, 0], rmax[0]))
                # rmax[1] = max((ratios[14, 1], rmax[1]))

                if (rmax - rmin) > 0.1 or (frame > 150 and
                                           (rmax - rmin) > (lmax - lmin)):

                poses[frame, ...] = ratios
                # poses[frame, :, 0] = x_ratios
                # poses[frame, :, 1] = y_ratios
                # poses[frame, :, 0] = pose_results[0]['keypoints'][:, 0] / m_dim
                # poses[frame, :, 1] = pose_results[0]['keypoints'][:, 1] / m_dim

            else:
                pose_results = prev_pose  # or maybe just skip saving
                print('lol')

        else:
            pose_results = prev_pose

        vis_img = vis_pose_result(pose_model, img, pose_results,
                                  dataset=dataset, kpt_score_thr=args.kpt_thr,
                                  show=False)

        if args.show or frame % skip_ratio == 0:
            cv2.imshow('Image', vis_img)
        frame += 1

        if save_out_video:
            videoWriter.write(vis_img)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    if save_out_video:
        videoWriter.release()
        out_file = fname.replace('.mp4', '.npy')
        np.save(out_file, poses)

    cv2.destroyAllWindows()


if __name__ == '__main__':
    print('starting...')
    main()
Exemplo n.º 15
0
def main():
    """Visualize the demo images.

    Using mmdet to detect the human.
    """
    parser = ArgumentParser()
    parser.add_argument('det_config', help='Config file for detection')
    parser.add_argument('det_checkpoint', help='Checkpoint file for detection')
    parser.add_argument('pose_config', help='Config file for pose')
    parser.add_argument('pose_checkpoint', help='Checkpoint file for pose')
    parser.add_argument('--img-root', type=str, default='', help='Image root')
    parser.add_argument('--img', type=str, default='', help='Image file')
    parser.add_argument('--show',
                        action='store_true',
                        default=False,
                        help='whether to show img')
    parser.add_argument('--out-img-root',
                        type=str,
                        default='',
                        help='root of the output img file. '
                        'Default not saving the visualization images.')
    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device used for inference')
    parser.add_argument('--bbox-thr',
                        type=float,
                        default=0.3,
                        help='Bounding bbox score threshold')
    parser.add_argument('--kpt-thr',
                        type=float,
                        default=0.3,
                        help='Keypoint score threshold')

    assert has_mmdet, 'Please install mmdet to run the demo.'

    args = parser.parse_args()

    assert args.show or (args.out_img_root != '')
    assert args.img != ''
    assert args.det_config is not None
    assert args.det_checkpoint is not None

    det_model = init_detector(args.det_config,
                              args.det_checkpoint,
                              device=args.device.lower())
    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(args.pose_config,
                                 args.pose_checkpoint,
                                 device=args.device.lower())

    dataset = pose_model.cfg.data['test']['type']

    image_name = os.path.join(args.img_root, args.img)

    # test a single image, the resulting box is (x1, y1, x2, y2)
    mmdet_results = inference_detector(det_model, image_name)

    # keep the person class bounding boxes.
    person_results = process_mmdet_results(mmdet_results)

    # test a single image, with a list of bboxes.

    # optional
    return_heatmap = False

    # e.g. use ('backbone', ) to return backbone feature
    output_layer_names = None

    pose_results, returned_outputs = inference_top_down_pose_model(
        pose_model,
        image_name,
        person_results,
        bbox_thr=args.bbox_thr,
        format='xyxy',
        dataset=dataset,
        return_heatmap=return_heatmap,
        outputs=output_layer_names)

    if args.out_img_root == '':
        out_file = None
    else:
        os.makedirs(args.out_img_root, exist_ok=True)
        out_file = os.path.join(args.out_img_root, f'vis_{args.img}')

    # show the results
    vis_pose_result(pose_model,
                    image_name,
                    pose_results,
                    dataset=dataset,
                    kpt_score_thr=args.kpt_thr,
                    show=args.show,
                    out_file=out_file)
Exemplo n.º 16
0
def main():
    """Visualize the demo images."""
    parser = ArgumentParser()
    parser.add_argument('pose_config', help='Config file for detection')
    parser.add_argument('pose_checkpoint', help='Checkpoint file')
    parser.add_argument('--img-root', type=str, default='', help='Image root')
    parser.add_argument('--json-file',
                        type=str,
                        default='',
                        help='Json file containing image info.')
    parser.add_argument('--show',
                        action='store_true',
                        default=False,
                        help='whether to show img')
    parser.add_argument('--out-img-root',
                        type=str,
                        default='',
                        help='Root of the output img file. '
                        'Default not saving the visualization images.')
    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device used for inference')
    parser.add_argument('--kpt-thr',
                        type=float,
                        default=0.3,
                        help='Keypoint score threshold')
    parser.add_argument('--pose-nms-thr',
                        type=float,
                        default=0.9,
                        help='OKS threshold for pose NMS')

    args = parser.parse_args()

    assert args.show or (args.out_img_root != '')

    coco = COCO(args.json_file)
    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(args.pose_config,
                                 args.pose_checkpoint,
                                 device=args.device.lower())

    dataset = pose_model.cfg.data['test']['type']
    assert (dataset == 'BottomUpCocoDataset')

    img_keys = list(coco.imgs.keys())

    # optional
    return_heatmap = False

    # e.g. use ('backbone', ) to return backbone feature
    output_layer_names = None

    # process each image
    for i in range(len(img_keys)):
        image_id = img_keys[i]
        image = coco.loadImgs(image_id)[0]
        image_name = os.path.join(args.img_root, image['file_name'])

        # test a single image, with a list of bboxes.
        pose_results, returned_outputs = inference_bottom_up_pose_model(
            pose_model,
            image_name,
            pose_nms_thr=args.pose_nms_thr,
            return_heatmap=return_heatmap,
            outputs=output_layer_names)

        if args.out_img_root == '':
            out_file = None
        else:
            os.makedirs(args.out_img_root, exist_ok=True)
            out_file = os.path.join(args.out_img_root, f'vis_{i}.jpg')

        # show the results
        vis_pose_result(pose_model,
                        image_name,
                        pose_results,
                        dataset=dataset,
                        kpt_score_thr=args.kpt_thr,
                        show=args.show,
                        out_file=out_file)
Exemplo n.º 17
0
def visualize(frames,
              annotations,
              pose_results,
              action_result,
              pose_model,
              plate=PLATEBLUE,
              max_num=5):
    """Visualize frames with predicted annotations.

    Args:
        frames (list[np.ndarray]): Frames for visualization, note that
            len(frames) % len(annotations) should be 0.
        annotations (list[list[tuple]]): The predicted spatio-temporal
            detection results.
        pose_results (list[list[tuple]): The pose results.
        action_result (str): The predicted action recognition results.
        pose_model (nn.Module): The constructed pose model.
        plate (str): The plate used for visualization. Default: PLATEBLUE.
        max_num (int): Max number of labels to visualize for a person box.
            Default: 5.

    Returns:
        list[np.ndarray]: Visualized frames.
    """

    assert max_num + 1 <= len(plate)
    plate = [x[::-1] for x in plate]
    frames_ = cp.deepcopy(frames)
    nf, na = len(frames), len(annotations)
    assert nf % na == 0
    nfpa = len(frames) // len(annotations)
    anno = None
    h, w, _ = frames[0].shape
    scale_ratio = np.array([w, h, w, h])

    # add pose results
    if pose_results:
        for i in range(nf):
            frames_[i] = vis_pose_result(pose_model, frames_[i],
                                         pose_results[i])

    for i in range(na):
        anno = annotations[i]
        if anno is None:
            continue
        for j in range(nfpa):
            ind = i * nfpa + j
            frame = frames_[ind]

            # add action result for whole video
            cv2.putText(frame, action_result, (10, 30), FONTFACE, FONTSCALE,
                        FONTCOLOR, THICKNESS, LINETYPE)

            # add spatio-temporal action detection results
            for ann in anno:
                box = ann[0]
                label = ann[1]
                if not len(label):
                    continue
                score = ann[2]
                box = (box * scale_ratio).astype(np.int64)
                st, ed = tuple(box[:2]), tuple(box[2:])
                if not pose_results:
                    cv2.rectangle(frame, st, ed, plate[0], 2)

                for k, lb in enumerate(label):
                    if k >= max_num:
                        break
                    text = abbrev(lb)
                    text = ': '.join([text, str(score[k])])
                    location = (0 + st[0], 18 + k * 18 + st[1])
                    textsize = cv2.getTextSize(text, FONTFACE, FONTSCALE,
                                               THICKNESS)[0]
                    textwidth = textsize[0]
                    diag0 = (location[0] + textwidth, location[1] - 14)
                    diag1 = (location[0], location[1] + 2)
                    cv2.rectangle(frame, diag0, diag1, plate[k + 1], -1)
                    cv2.putText(frame, text, location, FONTFACE, FONTSCALE,
                                FONTCOLOR, THICKNESS, LINETYPE)

    return frames_
Exemplo n.º 18
0
def main():
    """Visualize the demo images.

    Using mmdet to detect the human.
    """
    parser = ArgumentParser()
    parser.add_argument('det_config', help='Config file for detection')
    parser.add_argument('det_checkpoint', help='Checkpoint file for detection')
    parser.add_argument('pose_config', help='Config file for pose')
    parser.add_argument('pose_checkpoint', help='Checkpoint file for pose')
    parser.add_argument('--video-path', type=str, help='Video path')
    parser.add_argument('--show',
                        action='store_true',
                        default=False,
                        help='whether to show visualizations.')
    parser.add_argument('--out-video-root',
                        default='',
                        help='Root of the output video file. '
                        'Default not saving the visualization video.')
    parser.add_argument('--device',
                        default='cpu',
                        help='Device used for inference')
    parser.add_argument('--bbox-thr',
                        type=float,
                        default=0.3,
                        help='Bounding box score threshold')
    parser.add_argument('--kpt-thr',
                        type=float,
                        default=0.3,
                        help='Keypoint score threshold')

    args = parser.parse_args()

    assert args.show or (args.out_video_root != '')
    assert args.det_config is not None
    assert args.det_checkpoint is not None

    det_model = init_detector(args.det_config,
                              args.det_checkpoint,
                              device=args.device)
    print('loaded detection model')
    # build the pose model from a config file and a checkpoint file
    print('pose config: {0} \npose checkpoint: {1}'.format(
        args.pose_config, args.pose_checkpoint))
    pose_model = init_pose_model(args.pose_config,
                                 args.pose_checkpoint,
                                 device=args.device)
    print('loaded poes model')

    dataset = pose_model.cfg.data['test']['type']

    print(dataset)

    cap = cv2.VideoCapture(args.video_path)

    print('loaded video')

    if args.out_video_root == '':
        save_out_video = False
    else:
        os.makedirs(args.out_video_root, exist_ok=True)
        save_out_video = True
        print('save path: {0}'.format(args.out_video_root))

    if save_out_video:
        fps = cap.get(cv2.CAP_PROP_FPS)
        size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
                int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        videoWriter = cv2.VideoWriter(
            os.path.join(args.out_video_root,
                         f'vis_{os.path.basename(args.video_path)}'), fourcc,
            fps, size)

    count = 0
    t0 = time.perf_counter()
    while (cap.isOpened()):
        t1 = time.perf_counter()
        flag, img = cap.read()

        if not flag:
            break
        # test a single image, the resulting box is (x1, y1, x2, y2)
        det_results = inference_detector(det_model, img)
        # keep the person class bounding boxes.
        person_bboxes = det_results[0].copy()

        # test a single image, with a list of bboxes.
        pose_results = inference_top_down_pose_model(pose_model,
                                                     img,
                                                     person_bboxes,
                                                     bbox_thr=args.bbox_thr,
                                                     format='xyxy',
                                                     dataset=dataset)

        count += 1
        t = time.perf_counter()
        print('Frame {0} analysed in {1} secs. Total time: {2} secs\
                '.format(count, t - t1, t - t0))

        # show the results
        vis_img = vis_pose_result(pose_model,
                                  img,
                                  pose_results,
                                  dataset=dataset,
                                  kpt_score_thr=args.kpt_thr,
                                  show=False)

        if args.show or count == 3:
            cv2.imshow('Image', vis_img)

        if save_out_video:
            videoWriter.write(vis_img)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    if save_out_video:
        videoWriter.release()
    cv2.destroyAllWindows()
Exemplo n.º 19
0
def main():
    """Visualize the demo images."""
    parser = ArgumentParser()
    parser.add_argument('pose_config', help='Config file for detection')
    parser.add_argument('pose_checkpoint', help='Checkpoint file')
    parser.add_argument('--img-root', type=str, default='', help='Image root')
    parser.add_argument('--json-file',
                        type=str,
                        default='',
                        help='Json file containing image info.')
    parser.add_argument('--show',
                        action='store_true',
                        default=False,
                        help='whether to show img')
    parser.add_argument('--out-img-root',
                        type=str,
                        default='',
                        help='Root of the output img file. '
                        'Default not saving the visualization images.')
    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device used for inference')
    parser.add_argument('--kpt-thr',
                        type=float,
                        default=0.3,
                        help='Keypoint score threshold')

    args = parser.parse_args()

    assert args.show or (args.out_img_root != '')

    skeleton = [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12],
                [7, 13], [6, 7], [6, 8], [7, 9], [8, 10], [9, 11], [2, 3],
                [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]]

    coco = COCO(args.json_file)
    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(args.pose_config,
                                 args.pose_checkpoint,
                                 device=args.device)

    img_keys = list(coco.imgs.keys())

    # process each image
    for i in range(len(img_keys)):
        image_id = img_keys[i]
        image = coco.loadImgs(image_id)[0]
        image_name = os.path.join(args.img_root, image['file_name'])

        # test a single image, with a list of bboxes.
        pose_results = inference_bottom_up_pose_model(pose_model, image_name)

        if args.out_img_root == '':
            out_file = None
        else:
            os.makedirs(args.out_img_root, exist_ok=True)
            out_file = os.path.join(args.out_img_root, f'vis_{i}.jpg')

        # show the results
        vis_pose_result(pose_model,
                        image_name,
                        pose_results,
                        skeleton=skeleton,
                        kpt_score_thr=args.kpt_thr,
                        show=args.show,
                        out_file=out_file)
Exemplo n.º 20
0
def main():
    """Visualize the demo images.

    Using mmdet to detect the human.
    """
    parser = ArgumentParser()
    parser.add_argument('pose_config', help='Config file for pose')
    parser.add_argument('pose_checkpoint', help='Checkpoint file for pose')
    parser.add_argument('--video-path', type=str, help='Video path')
    parser.add_argument('--show',
                        action='store_true',
                        default=False,
                        help='whether to show visualizations.')
    parser.add_argument('--out-video-root',
                        default='',
                        help='Root of the output video file. '
                        'Default not saving the visualization video.')
    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device used for inference')
    parser.add_argument('--kpt-thr',
                        type=float,
                        default=0.3,
                        help='Keypoint score threshold')
    parser.add_argument('--radius',
                        type=int,
                        default=4,
                        help='Keypoint radius for visualization')
    parser.add_argument('--thickness',
                        type=int,
                        default=1,
                        help='Link thickness for visualization')

    assert has_face_det, 'Please install face_recognition to run the demo. '\
                         '"pip install face_recognition", For more details, '\
                         'see https://github.com/ageitgey/face_recognition'

    args = parser.parse_args()

    assert args.show or (args.out_video_root != '')

    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(args.pose_config,
                                 args.pose_checkpoint,
                                 device=args.device.lower())

    dataset = pose_model.cfg.data['test']['type']
    dataset_info = pose_model.cfg.data['test'].get('dataset_info', None)
    if dataset_info is None:
        warnings.warn(
            'Please set `dataset_info` in the config.'
            'Check https://github.com/open-mmlab/mmpose/pull/663 for details.',
            DeprecationWarning)
    else:
        dataset_info = DatasetInfo(dataset_info)

    cap = cv2.VideoCapture(args.video_path)
    assert cap.isOpened(), f'Faild to load video file {args.video_path}'

    if args.out_video_root == '':
        save_out_video = False
    else:
        os.makedirs(args.out_video_root, exist_ok=True)
        save_out_video = True

    if save_out_video:
        fps = cap.get(cv2.CAP_PROP_FPS)
        size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
                int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        videoWriter = cv2.VideoWriter(
            os.path.join(args.out_video_root,
                         f'vis_{os.path.basename(args.video_path)}'), fourcc,
            fps, size)

    # optional
    return_heatmap = False

    # e.g. use ('backbone', ) to return backbone feature
    output_layer_names = None

    while (cap.isOpened()):
        flag, img = cap.read()
        if not flag:
            break

        face_det_results = face_recognition.face_locations(
            cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
        face_results = process_face_det_results(face_det_results)

        # test a single image, with a list of bboxes.
        pose_results, returned_outputs = inference_top_down_pose_model(
            pose_model,
            img,
            face_results,
            bbox_thr=None,
            format='xyxy',
            dataset=dataset,
            dataset_info=dataset_info,
            return_heatmap=return_heatmap,
            outputs=output_layer_names)

        # show the results
        vis_img = vis_pose_result(pose_model,
                                  img,
                                  pose_results,
                                  radius=args.radius,
                                  thickness=args.thickness,
                                  dataset=dataset,
                                  dataset_info=dataset_info,
                                  kpt_score_thr=args.kpt_thr,
                                  show=False)

        if args.show:
            cv2.imshow('Image', vis_img)

        if save_out_video:
            videoWriter.write(vis_img)

        if args.show and cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    if save_out_video:
        videoWriter.release()
    if args.show:
        cv2.destroyAllWindows()
Exemplo n.º 21
0
def main():
    """Visualize the demo images.

    Using mmdet to detect the human.
    """
    parser = ArgumentParser()
    parser.add_argument('det_config', help='Config file for detection')
    parser.add_argument('det_checkpoint', help='Checkpoint file for detection')
    parser.add_argument('pose_config', help='Config file for pose')
    parser.add_argument('pose_checkpoint', help='Checkpoint file for pose')
    parser.add_argument('--img-root', type=str, default='', help='Image root')
    parser.add_argument('--img', type=str, default='', help='Image file')
    parser.add_argument(
        '--show',
        action='store_true',
        default=False,
        help='whether to show img')
    parser.add_argument(
        '--out-img-root',
        type=str,
        default='',
        help='root of the output img file. '
        'Default not saving the visualization images.')
    parser.add_argument(
        '--device', default='cuda:0', help='Device used for inference')
    parser.add_argument(
        '--det-cat-id',
        type=int,
        default=1,
        help='Category id for bounding box detection model')
    parser.add_argument(
        '--bbox-thr',
        type=float,
        default=0.3,
        help='Bounding box score threshold')
    parser.add_argument(
        '--kpt-thr', type=float, default=0.3, help='Keypoint score threshold')
    parser.add_argument(
        '--radius',
        type=int,
        default=4,
        help='Keypoint radius for visualization')
    parser.add_argument(
        '--thickness',
        type=int,
        default=1,
        help='Link thickness for visualization')

    assert has_mmdet, 'Please install mmdet to run the demo.'

    args = parser.parse_args()

    assert args.show or (args.out_img_root != '')
    assert args.img != ''
    assert args.det_config is not None
    assert args.det_checkpoint is not None

    det_model = init_detector(
        args.det_config, args.det_checkpoint, device=args.device.lower())
    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(
        args.pose_config, args.pose_checkpoint, device=args.device.lower())

    dataset = pose_model.cfg.data['test']['type']
    dataset_info = pose_model.cfg.data['test'].get('dataset_info', None)
    if dataset_info is None:
        warnings.warn(
            'Please set `dataset_info` in the config.'
            'Check https://github.com/open-mmlab/mmpose/pull/663 for details.',
            DeprecationWarning)
    else:
        dataset_info = DatasetInfo(dataset_info)

    image_name = os.path.join(args.img_root, args.img)

    # test a single image, the resulting box is (x1, y1, x2, y2)
    mmdet_results = inference_detector(det_model, image_name)

    # keep the person class bounding boxes.
    person_results = process_mmdet_results(mmdet_results, args.det_cat_id)

    # test a single image, with a list of bboxes.

    # optional
    return_heatmap = False

    # e.g. use ('backbone', ) to return backbone feature
    output_layer_names = None

    pose_results, returned_outputs = inference_top_down_pose_model(
        pose_model,
        image_name,
        person_results,
        bbox_thr=args.bbox_thr,
        format='xyxy',
        dataset=dataset,
        dataset_info=dataset_info,
        return_heatmap=return_heatmap,
        outputs=output_layer_names)

    if args.out_img_root == '':
        out_file = None
    else:
        os.makedirs(args.out_img_root, exist_ok=True)
        out_file = os.path.join(args.out_img_root, f'vis_{args.img}')

    # show the results
    vis_pose_result(
        pose_model,
        image_name,
        pose_results,
        dataset=dataset,
        dataset_info=dataset_info,
        kpt_score_thr=args.kpt_thr,
        radius=args.radius,
        thickness=args.thickness,
        show=args.show,
        out_file=out_file)
Exemplo n.º 22
0
def main():
    """Visualize the demo images.
    Input image edge coordinates as bbox. 
    """
    parser = ArgumentParser()
    parser.add_argument('pose_config', help='Config file for detection')
    parser.add_argument('pose_checkpoint', help='Checkpoint file')
    parser.add_argument('--img-root', type=str, default='', help='Image root')
    parser.add_argument('--show',
                        action='store_true',
                        default=False,
                        help='whether to show img')
    parser.add_argument('--out-img-root',
                        type=str,
                        default='',
                        help='Root of the output img file. '
                        'Default not saving the visualization images.')
    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device used for inference')
    parser.add_argument('--kpt-thr',
                        type=float,
                        default=0.3,
                        help='Keypoint score threshold')

    args = parser.parse_args()

    assert args.show or (args.out_img_root != '')
    assert 'cuda' in args.device

    skeleton = [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12],
                [7, 13], [6, 7], [6, 8], [7, 9], [8, 10], [9, 11], [2, 3],
                [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]]

    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(args.pose_config,
                                 args.pose_checkpoint,
                                 device=args.device)

    img_name_list = []
    file_list = os.listdir(args.img_root)
    for file_name in sorted(file_list):
        if '.jpg' in file_name:
            img_name_list.append(file_name)
    save_list = []
    # process each image
    for i, img_name in enumerate(img_name_list):
        img_path = os.path.join(args.img_root, img_name)
        img = Image.open(img_path)
        width, height = img.size

        # make person bounding boxes: [x,y,width,height]
        person_bboxes = []
        person_bboxes.append([
            int(width * 5 / 110),
            int(height * 5 / 110),
            int(width * 100 / 110),
            int(height * 100 / 110)
        ])

        # test a single image, with a list of bboxes.
        pose_results = inference_top_down_pose_model(pose_model,
                                                     img_path,
                                                     person_bboxes,
                                                     format='xywh')
        print(len(pose_results[0]['keypoints'].tolist()))
        save_list.append(pose_results[0]['keypoints'].tolist())

        if args.out_img_root == '':
            out_file = None
        else:
            out_file = os.path.join(args.out_img_root, f'vis_{i}.jpg')

        # show the results
        vis_pose_result(pose_model,
                        img_path,
                        pose_results,
                        kpt_score_thr=args.kpt_thr,
                        show=args.show,
                        out_file=out_file)
    json_string = json.dumps(save_list, indent=2)
    with open(os.path.join(args.out_img_root, 'results.json'), "w") as f:
        f.write(json_string)
Exemplo n.º 23
0
def main():
    """Visualize the demo images.

    Using mmdet to detect the human.
    """
    parser = ArgumentParser()
    parser.add_argument('det_config', help='Config file for detection')
    parser.add_argument('det_checkpoint', help='Checkpoint file for detection')
    parser.add_argument('pose_config', help='Config file for pose')
    parser.add_argument('pose_checkpoint', help='Checkpoint file for pose')
    parser.add_argument('--video-path', type=str, help='Video path')
    parser.add_argument('--show',
                        action='store_true',
                        default=False,
                        help='whether to show visualizations.')
    parser.add_argument('--out-video-root',
                        default='',
                        help='Root of the output video file. '
                        'Default not saving the visualization video.')
    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device used for inference')
    parser.add_argument('--bbox-thr',
                        type=float,
                        default=0.3,
                        help='Bounding box score threshold')
    parser.add_argument('--kpt-thr',
                        type=float,
                        default=0.3,
                        help='Keypoint score threshold')

    args = parser.parse_args()

    skeleton = [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12],
                [7, 13], [6, 7], [6, 8], [7, 9], [8, 10], [9, 11], [2, 3],
                [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]]

    assert args.show or (args.out_video_root != '')
    assert 'cuda' in args.device
    assert args.det_config is not None
    assert args.det_checkpoint is not None

    det_model = init_detector(args.det_config,
                              args.det_checkpoint,
                              device=args.device)
    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(args.pose_config,
                                 args.pose_checkpoint,
                                 device=args.device)

    cap = cv2.VideoCapture(args.video_path)

    if args.out_video_root == '':
        save_out_video = False
    else:
        os.makedirs(args.out_video_root, exist_ok=True)
        save_out_video = True

    if save_out_video:
        fps = cap.get(cv2.CAP_PROP_FPS)
        size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
                int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        videoWriter = cv2.VideoWriter(
            os.path.join(args.out_video_root,
                         f'vis_{os.path.basename(args.video_path)}'), fourcc,
            fps, size)

    while (cap.isOpened()):
        flag, img = cap.read()
        if not flag:
            break
        # test a single image, the resulting box is (x1, y1, x2, y2)
        det_results = inference_detector(det_model, img)
        # keep the person class bounding boxes.
        person_bboxes = det_results[0].copy()

        # test a single image, with a list of bboxes.
        pose_results = inference_pose_model(pose_model,
                                            img,
                                            person_bboxes,
                                            bbox_thr=args.bbox_thr,
                                            format='xyxy')

        # show the results
        vis_img = vis_pose_result(pose_model,
                                  img,
                                  pose_results,
                                  skeleton=skeleton,
                                  kpt_score_thr=args.kpt_thr,
                                  show=False)

        if args.show:
            cv2.imshow('Image', vis_img)

        if save_out_video:
            videoWriter.write(vis_img)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    if save_out_video:
        videoWriter.release()
    cv2.destroyAllWindows()
Exemplo n.º 24
0
def main():
    """Visualize the demo images."""
    parser = ArgumentParser()
    parser.add_argument('pose_config', help='Config file for detection')
    parser.add_argument('pose_checkpoint', help='Checkpoint file')
    parser.add_argument(
        '--img-path',
        type=str,
        help='Path to an image file or a image folder.')
    parser.add_argument(
        '--show',
        action='store_true',
        default=False,
        help='whether to show img')
    parser.add_argument(
        '--out-img-root',
        type=str,
        default='',
        help='Root of the output img file. '
        'Default not saving the visualization images.')
    parser.add_argument(
        '--device', default='cuda:0', help='Device used for inference')
    parser.add_argument(
        '--kpt-thr', type=float, default=0.3, help='Keypoint score threshold')
    parser.add_argument(
        '--pose-nms-thr',
        type=float,
        default=0.9,
        help='OKS threshold for pose NMS')
    parser.add_argument(
        '--radius',
        type=int,
        default=4,
        help='Keypoint radius for visualization')
    parser.add_argument(
        '--thickness',
        type=int,
        default=1,
        help='Link thickness for visualization')

    args = parser.parse_args()

    assert args.show or (args.out_img_root != '')

    # prepare image list
    if osp.isfile(args.img_path):
        image_list = [args.img_path]
    elif osp.isdir(args.img_path):
        image_list = [
            osp.join(args.img_path, fn) for fn in os.listdir(args.img_path)
            if fn.lower().endswith(('.png', '.jpg', '.jpeg', '.tiff', '.bmp'))
        ]
    else:
        raise ValueError('Image path should be an image or image folder.'
                         f'Got invalid image path: {args.img_path}')

    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(
        args.pose_config, args.pose_checkpoint, device=args.device.lower())

    dataset = pose_model.cfg.data['test']['type']
    dataset_info = pose_model.cfg.data['test'].get('dataset_info', None)
    if dataset_info is None:
        warnings.warn(
            'Please set `dataset_info` in the config.'
            'Check https://github.com/open-mmlab/mmpose/pull/663 for details.',
            DeprecationWarning)
        assert (dataset == 'BottomUpCocoDataset')
    else:
        dataset_info = DatasetInfo(dataset_info)

    # optional
    return_heatmap = False

    # e.g. use ('backbone', ) to return backbone feature
    output_layer_names = None

    # process each image
    for image_name in mmcv.track_iter_progress(image_list):

        # test a single image, with a list of bboxes.
        pose_results, returned_outputs = inference_bottom_up_pose_model(
            pose_model,
            image_name,
            dataset=dataset,
            dataset_info=dataset_info,
            pose_nms_thr=args.pose_nms_thr,
            return_heatmap=return_heatmap,
            outputs=output_layer_names)

        if args.out_img_root == '':
            out_file = None
        else:
            os.makedirs(args.out_img_root, exist_ok=True)
            out_file = os.path.join(
                args.out_img_root,
                f'vis_{osp.splitext(osp.basename(image_name))[0]}.jpg')

        # show the results
        vis_pose_result(
            pose_model,
            image_name,
            pose_results,
            radius=args.radius,
            thickness=args.thickness,
            dataset=dataset,
            dataset_info=dataset_info,
            kpt_score_thr=args.kpt_thr,
            show=args.show,
            out_file=out_file)
Exemplo n.º 25
0
def main():
    args = parse_args()

    frame_paths, original_frames = frame_extraction(args.video,
                                                    args.short_side)
    num_frame = len(frame_paths)
    h, w, _ = original_frames[0].shape

    # Get clip_len, frame_interval and calculate center index of each clip
    config = mmcv.Config.fromfile(args.config)
    config.merge_from_dict(args.cfg_options)

    test_pipeline = Compose(config.data.test.pipeline)

    # Load label_map
    label_map = [x.strip() for x in open(args.label_map).readlines()]

    # Get Human detection results
    det_results = detection_inference(args, frame_paths)
    torch.cuda.empty_cache()

    pose_results = pose_inference(args, frame_paths, det_results)
    torch.cuda.empty_cache()

    fake_anno = dict(frame_dir='',
                     label=-1,
                     img_shape=(h, w),
                     original_shape=(h, w),
                     start_index=0,
                     modality='Pose',
                     total_frames=num_frame)
    num_person = max([len(x) for x in pose_results])
    # Current PoseC3D models are trained on COCO-keypoints (17 keypoints)
    num_keypoint = 17
    keypoint = np.zeros((num_person, num_frame, num_keypoint, 2),
                        dtype=np.float16)
    keypoint_score = np.zeros((num_person, num_frame, num_keypoint),
                              dtype=np.float16)
    for i, poses in enumerate(pose_results):
        for j, pose in enumerate(poses):
            pose = pose['keypoints']
            keypoint[j, i] = pose[:, :2]
            keypoint_score[j, i] = pose[:, 2]
    fake_anno['keypoint'] = keypoint
    fake_anno['keypoint_score'] = keypoint_score

    imgs = test_pipeline(fake_anno)['imgs'][None]
    imgs = imgs.to(args.device)

    model = build_model(config.model)
    load_checkpoint(model, args.checkpoint, map_location=args.device)
    model.to(args.device)
    model.eval()

    with torch.no_grad():
        output = model(return_loss=False, imgs=imgs)

    action_idx = np.argmax(output)
    action_label = label_map[action_idx]

    pose_model = init_pose_model(args.pose_config, args.pose_checkpoint,
                                 args.device)
    vis_frames = [
        vis_pose_result(pose_model, frame_paths[i], pose_results[i])
        for i in range(num_frame)
    ]
    for frame in vis_frames:
        cv2.putText(frame, action_label, (10, 30), FONTFACE, FONTSCALE,
                    FONTCOLOR, THICKNESS, LINETYPE)

    cv2.imwrite('frame.jpg', vis_frames[0])
    vid = mpy.ImageSequenceClip([x[:, :, ::-1] for x in vis_frames], fps=24)
    vid.write_videofile(args.out_filename, remove_temp=True)

    tmp_frame_dir = osp.dirname(frame_paths[0])
    shutil.rmtree(tmp_frame_dir)
Exemplo n.º 26
0
def loop(args, rotate, fname, person_bboxes, pose_model, flipped=False):

    cap = cv2.VideoCapture(args.video_path)

    fps = cap.get(cv2.CAP_PROP_FPS)
    frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

    if rotate:
        size = (int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)),
                int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)))
    else:
        size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
                int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
    m_dim = max(size)
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    videoWriter = cv2.VideoWriter(fname, fourcc, fps, size)
    poses = np.zeros((frames,
                      pose_model.cfg.channel_cfg['num_output_channels'], 2))
    dataset = pose_model.cfg.data['test']['type']

    skip_ratio = 1

    lmin = 1
    lmax = 0
    rmin = 1
    rmax = 0

    frame = 0
    t0 = time.perf_counter()
    prev_pose = 0
    while (cap.isOpened()):
        t1 = time.perf_counter()
        flag, img = cap.read()
        if rotate:
            img = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE)
        if flipped:
            img = cv2.flip(img, 1)
        if not flag:
            break

        # check every nd frame
        if frame % skip_ratio == 0:
            # test a single image, with a list of bboxes.
            pose_results = inference_top_down_pose_model(pose_model, img,
                                                         person_bboxes,
                                                         bbox_thr=args.box_thr,
                                                         format='xyxy',
                                                         dataset=dataset)
            t = time.perf_counter()

            print('Frame {0} out of {1} '.format(frame, frames) +
                  'analysed in {0} secs. '.format(t - t1) +
                  'Total time: {0} secs'.format(t - t0))

            # show the results
            if np.shape(pose_results)[0] > 0:
                prev_pose = pose_results

                ratios = pose_results[0]['keypoints'][:, 0:2] / m_dim

                lmin = min((ratios[13, 1], lmin))
                lmax = max((ratios[13, 1], lmax))
                rmin = min((ratios[14, 1], rmin))
                rmax = max((ratios[14, 1], rmax))

                if not flipped and ((rmax - rmin) > 0.1 or
                                    (frame > 150 and
                                     (rmax - rmin) > (lmax - lmin))):
                    # flipped = True
                    print('Left knee evaluated, restarting ' +
                          'with flipped images...')
                    cap.release()
                    videoWriter.release()
                    cv2.destroyAllWindows()
                    loop(args, rotate, fname, flip_box(person_bboxes, size[0]),
                         pose_model, True)
                    return

                poses[frame, ...] = ratios

            else:
                pose_results = prev_pose  # or maybe just skip saving
                print('lol')

        else:
            pose_results = prev_pose

        vis_img = vis_pose_result(pose_model, img, pose_results,
                                  dataset=dataset, kpt_score_thr=args.kpt_thr,
                                  show=False)

        if args.show or frame % skip_ratio == 0:
            cv2.imshow('Image', vis_img)
        frame += 1

        # if save_out_video:
        videoWriter.write(vis_img)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    # if save_out_video:
    videoWriter.release()
    out_file = fname.replace('.mp4', '.npy')
    np.save(out_file, poses)

    cv2.destroyAllWindows()
Exemplo n.º 27
0
def main():
    """Visualize the demo images."""
    parser = ArgumentParser()
    parser.add_argument('pose_config', help='Config file for pose')
    parser.add_argument('pose_checkpoint', help='Checkpoint file for pose')
    parser.add_argument('--video-path', type=str, help='Video path')
    parser.add_argument('--show',
                        action='store_true',
                        default=False,
                        help='whether to show visualizations.')
    parser.add_argument('--out-video-root',
                        default='',
                        help='Root of the output video file. '
                        'Default not saving the visualization video.')
    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device used for inference')
    parser.add_argument('--kpt-thr',
                        type=float,
                        default=0.3,
                        help='Keypoint score threshold')
    parser.add_argument('--pose-nms-thr',
                        type=float,
                        default=0.9,
                        help='OKS threshold for pose NMS')
    parser.add_argument('--radius',
                        type=int,
                        default=4,
                        help='Keypoint radius for visualization')
    parser.add_argument('--thickness',
                        type=int,
                        default=1,
                        help='Link thickness for visualization')

    args = parser.parse_args()

    assert args.show or (args.out_video_root != '')

    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(args.pose_config,
                                 args.pose_checkpoint,
                                 device=args.device.lower())

    dataset = pose_model.cfg.data['test']['type']
    dataset_info = pose_model.cfg.data['test'].get('dataset_info', None)
    if dataset_info is None:
        warnings.warn(
            'Please set `dataset_info` in the config.'
            'Check https://github.com/open-mmlab/mmpose/pull/663 for details.',
            DeprecationWarning)
        assert (dataset == 'BottomUpCocoDataset')
    else:
        dataset_info = DatasetInfo(dataset_info)

    # read video
    video = mmcv.VideoReader(args.video_path)
    assert video.opened, f'Faild to load video file {args.video_path}'

    if args.out_video_root == '':
        save_out_video = False
    else:
        os.makedirs(args.out_video_root, exist_ok=True)
        save_out_video = True

    if save_out_video:
        fps = video.fps
        size = (video.width, video.height)
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        videoWriter = cv2.VideoWriter(
            os.path.join(args.out_video_root,
                         f'vis_{os.path.basename(args.video_path)}'), fourcc,
            fps, size)

    # optional
    return_heatmap = False

    # e.g. use ('backbone', ) to return backbone feature
    output_layer_names = None

    print('Running inference...')
    for _, cur_frame in enumerate(mmcv.track_iter_progress(video)):
        pose_results, _ = inference_bottom_up_pose_model(
            pose_model,
            cur_frame,
            dataset=dataset,
            dataset_info=dataset_info,
            pose_nms_thr=args.pose_nms_thr,
            return_heatmap=return_heatmap,
            outputs=output_layer_names)

        # show the results
        vis_frame = vis_pose_result(pose_model,
                                    cur_frame,
                                    pose_results,
                                    radius=args.radius,
                                    thickness=args.thickness,
                                    dataset=dataset,
                                    dataset_info=dataset_info,
                                    kpt_score_thr=args.kpt_thr,
                                    show=False)

        if args.show:
            cv2.imshow('Image', vis_frame)

        if save_out_video:
            videoWriter.write(vis_frame)

        if args.show and cv2.waitKey(1) & 0xFF == ord('q'):
            break

    if save_out_video:
        videoWriter.release()
    if args.show:
        cv2.destroyAllWindows()
Exemplo n.º 28
0
def main():
    """Visualize the demo images.

    Using mmdet to detect the human.
    """
    parser = ArgumentParser()
    parser.add_argument('pose_config', help='Config file for pose')
    parser.add_argument('pose_checkpoint', help='Checkpoint file for pose')
    parser.add_argument('--video-path', type=str, help='Video path')
    parser.add_argument('--show',
                        action='store_true',
                        default=False,
                        help='whether to show visualizations.')
    parser.add_argument('--out-video-root',
                        default='',
                        help='Root of the output video file. '
                        'Default not saving the visualization video.')
    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device used for inference')
    parser.add_argument('--kpt-thr',
                        type=float,
                        default=0.3,
                        help='Keypoint score threshold')

    args = parser.parse_args()

    print(args)
    assert args.show or (args.out_video_root != '')
    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(args.pose_config,
                                 args.pose_checkpoint,
                                 device=args.device.lower())

    dataset = pose_model.cfg.data['test']['type']
    print('dataset', dataset)
    cap = cv2.VideoCapture(args.video_path)
    assert cap.isOpened(), f'Faild to load video file {args.video_path}'
    print('cap', cap)
    fps = cap.get(5)
    #fps = cap.get(cv2.CAP_PROP_FPS)
    print('fps', fps)
    size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
            int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
    print('frame size', size)
    if args.out_video_root == '':
        save_out_video = False
    else:
        os.makedirs(args.out_video_root, exist_ok=True)
        save_out_video = True

    if save_out_video:
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        videoWriter = cv2.VideoWriter(
            os.path.join(args.out_video_root,
                         f'vis_{os.path.basename(args.video_path)}'), fourcc,
            fps, size)

    # optional
    return_heatmap = False

    # e.g. use ('backbone', ) to return backbone feature
    output_layer_names = None

    pose_res_json = {'result': []}
    pose_res_list = []
    img_idx = 0
    while (cap.isOpened()):
        flag, img = cap.read()
        if not flag:
            break
        # current frame image pose result
        temp_img_pose = {'id': img_idx, 'keypoints': []}
        # keep the person class bounding boxes.
        person_results = [{'bbox': np.array([0, 0, size[0], size[1]])}]

        # test a single image, with a list of bboxes.
        pose_results, returned_outputs = inference_top_down_pose_model(
            pose_model,
            img,
            person_results,
            format='xyxy',
            dataset=dataset,
            return_heatmap=return_heatmap,
            outputs=output_layer_names)
        #pose_results contains
        #print('pose_results', pose_results[0])
        #print('******')
        #print('returned_outputs', returned_outputs)
        # update data
        temp_img_pose['id'] = img_idx
        temp_img_pose['keypoints'] = pose_results[0]['keypoints'].tolist()
        if img_idx % 100 == 0:
            print('complete ', img_idx, ' frames.')
        img_idx += 1

        # show the results
        vis_img = vis_pose_result(pose_model,
                                  img,
                                  pose_results,
                                  dataset=dataset,
                                  kpt_score_thr=args.kpt_thr,
                                  show=False)

        if args.show:
            cv2.imshow('Image', vis_img)

        if save_out_video:
            videoWriter.write(vis_img)
        # save current pose result
        pose_res_list.append(temp_img_pose)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    if save_out_video:
        videoWriter.release()
    cv2.destroyAllWindows()
    # update pose result list
    print('frames count: ', len(pose_res_list))
    pose_res_json['result'] = pose_res_list
    # save json data
    with open('./video_leg_front.json', 'w') as json_file:
        json.dump(pose_res_json, json_file)
Exemplo n.º 29
0
def main():
    """Visualize the demo images.

    Require the json_file containing boxes.
    """
    parser = ArgumentParser()
    parser.add_argument('pose_config', help='Config file for detection')
    parser.add_argument('pose_checkpoint', help='Checkpoint file')
    parser.add_argument('--img-root', type=str, default='', help='Image root')
    parser.add_argument('--json-file',
                        type=str,
                        default='',
                        help='Json file containing image info.')
    parser.add_argument('--show',
                        action='store_true',
                        default=False,
                        help='whether to show img')
    parser.add_argument('--out-img-root',
                        type=str,
                        default='',
                        help='Root of the output img file. '
                        'Default not saving the visualization images.')
    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device used for inference')
    parser.add_argument('--kpt-thr',
                        type=float,
                        default=0.3,
                        help='Keypoint score threshold')

    args = parser.parse_args()

    assert args.show or (args.out_img_root != '')

    coco = COCO(args.json_file)
    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(args.pose_config,
                                 args.pose_checkpoint,
                                 device=args.device)

    dataset = pose_model.cfg.data['test']['type']

    img_keys = list(coco.imgs.keys())

    # optional
    return_heatmap = False

    # e.g. use ('backbone', ) to return backbone feature
    output_layer_names = None

    # process each image
    for i in range(len(img_keys)):
        # get bounding box annotations
        image_id = img_keys[i]
        image = coco.loadImgs(image_id)[0]
        image_name = os.path.join(args.img_root, image['file_name'])
        ann_ids = coco.getAnnIds(image_id)

        # make person bounding boxes
        person_bboxes = []
        for ann_id in ann_ids:
            ann = coco.anns[ann_id]
            # bbox format is 'xywh'
            bbox = ann['bbox']
            person_bboxes.append(bbox)

        # test a single image, with a list of bboxes
        pose_results, returned_outputs = inference_top_down_pose_model(
            pose_model,
            image_name,
            person_bboxes,
            bbox_thr=args.bbox_thr,
            format='xywh',
            dataset=dataset,
            return_heatmap=return_heatmap,
            outputs=output_layer_names)

        if args.out_img_root == '':
            out_file = None
        else:
            os.makedirs(args.out_img_root, exist_ok=True)
            out_file = os.path.join(args.out_img_root, f'vis_{i}.jpg')

        vis_pose_result(pose_model,
                        image_name,
                        pose_results,
                        dataset=dataset,
                        kpt_score_thr=args.kpt_thr,
                        show=args.show,
                        out_file=out_file)
Exemplo n.º 30
0
def main():
    """Visualize the demo images.

    Using mmdet to detect the human.
    """
    parser = ArgumentParser()
    parser.add_argument('det_config', help='Config file for detection')
    parser.add_argument('det_checkpoint', help='Checkpoint file for detection')
    parser.add_argument('pose_config', help='Config file for pose')
    parser.add_argument('pose_checkpoint', help='Checkpoint file for pose')
    parser.add_argument('--img-root', type=str, default='', help='Image root')
    parser.add_argument('--img', type=str, default='', help='Image file')
    parser.add_argument('--show',
                        action='store_true',
                        default=False,
                        help='whether to show img')
    parser.add_argument('--out-img-root',
                        type=str,
                        default='',
                        help='root of the output img file. '
                        'Default not saving the visualization images.')
    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device used for inference')
    parser.add_argument('--bbox-thr',
                        type=float,
                        default=0.3,
                        help='Bounding bbox score threshold')
    parser.add_argument('--kpt-thr',
                        type=float,
                        default=0.3,
                        help='Keypoint score threshold')

    args = parser.parse_args()

    skeleton = [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12],
                [7, 13], [6, 7], [6, 8], [7, 9], [8, 10], [9, 11], [2, 3],
                [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]]

    assert args.show or (args.out_img_root != '')
    assert args.img != ''
    assert 'cuda' in args.device
    assert args.det_config is not None
    assert args.det_checkpoint is not None

    det_model = init_detector(args.det_config,
                              args.det_checkpoint,
                              device=args.device)
    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(args.pose_config,
                                 args.pose_checkpoint,
                                 device=args.device)

    image_name = os.path.join(args.img_root, args.img)

    # test a single image, the resulting box is (x1, y1, x2, y2)
    det_results = inference_detector(det_model, image_name)

    # keep the person class bounding boxes. (FasterRCNN)
    person_bboxes = det_results[0].copy()

    # test a single image, with a list of bboxes.
    pose_results = inference_pose_model(pose_model,
                                        image_name,
                                        person_bboxes,
                                        bbox_thr=args.bbox_thr,
                                        format='xyxy')

    if args.out_img_root == '':
        out_file = None
    else:
        out_file = os.path.join(args.out_img_root, f'vis_{args.img}')

    # show the results
    vis_pose_result(pose_model,
                    image_name,
                    pose_results,
                    skeleton=skeleton,
                    kpt_score_thr=args.kpt_thr,
                    show=args.show,
                    out_file=out_file)