Exemple #1
0
def test_top_down_demo():
    # COCO demo
    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(
        'configs/top_down/resnet/coco/res50_coco_256x192.py',
        None,
        device='cpu')
    image_name = 'tests/data/coco/000000000785.jpg'

    person_result = []
    person_result.append({'bbox': [50, 50, 50, 100]})
    # test a single image, with a list of bboxes.
    pose_results, _ = inference_top_down_pose_model(
        pose_model, image_name, person_result, format='xywh')
    # show the results
    vis_pose_result(pose_model, image_name, pose_results)

    # AIC demo
    pose_model = init_pose_model(
        'configs/top_down/resnet/aic/res50_aic_256x192.py', None, device='cpu')
    image_name = 'tests/data/aic/054d9ce9201beffc76e5ff2169d2af2f027002ca.jpg'
    # test a single image, with a list of bboxes.
    pose_results, _ = inference_top_down_pose_model(
        pose_model,
        image_name,
        person_result,
        format='xywh',
        dataset='TopDownAicDataset')
    # show the results
    vis_pose_result(
        pose_model, image_name, pose_results, dataset='TopDownAicDataset')

    # OneHand10K demo
    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(
        'configs/hand/resnet/onehand10k/res50_onehand10k_256x256.py',
        None,
        device='cpu')
    image_name = 'tests/data/onehand10k/9.jpg'
    # test a single image, with a list of bboxes.
    pose_results, _ = inference_top_down_pose_model(
        pose_model,
        image_name,
        person_result,
        format='xywh',
        dataset='OneHand10KDataset')
    # show the results
    vis_pose_result(
        pose_model, image_name, pose_results, dataset='OneHand10KDataset')

    with pytest.raises(NotImplementedError):
        pose_results, _ = inference_top_down_pose_model(
            pose_model,
            image_name,
            person_result,
            format='xywh',
            dataset='test')
    def update(self, img):
        # test a single image, the resulting box is (x1, y1, x2, y2)
        mmdet_results = inference_detector(self.det_model, img)
        # keep the person class bounding boxes.
        person_bboxes = self.process_mmdet_results(mmdet_results)
        # test a single image, with a list of bboxes.
        self.last_pose_results, self.last_returned_outputs = inference_top_down_pose_model(
            self.pose_model,
            img,
            person_bboxes,
            bbox_thr=self.bbox_thr,
            format='xyxy',
            dataset=self.dataset,
            return_heatmap=self.return_heatmap,
            outputs=self.output_layer_names)

        population = len(self.last_pose_results)
        self.last_raw_results = []
        self.last_converted_results = []
        self.last_scores = []
        for index in range(population):
            keypoint = self.last_pose_results[index]['keypoints']
            rawret = []
            scores = []
            minpos = [sys.float_info.max, sys.float_info.max]
            maxpos = [-sys.float_info.max, -sys.float_info.max]
            for cl in self.coco_to_sem:
                rawpt = [0.0, 0.0]
                score = 0
                for c in cl:
                    rawpt[0] = rawpt[0] + keypoint[c][0]
                    rawpt[1] = rawpt[1] + keypoint[c][1]
                    score = score + keypoint[c][2]
                cn = len(cl)
                rawpt[0] = rawpt[0] / cn
                rawpt[1] = rawpt[1] / cn
                score = score / cn
                rawret.append(rawpt)
                scores.append(score)
                minpos[0] = min(rawpt[0], minpos[0])
                minpos[1] = min(rawpt[1], minpos[1])
                maxpos[0] = max(rawpt[0], maxpos[0])
                maxpos[1] = max(rawpt[1], maxpos[1])

            cx = (maxpos[0] + minpos[0]) / 2
            cy = (maxpos[1] + minpos[1]) / 2
            width = (maxpos[0] - minpos[0]) / 2
            height = (maxpos[1] - minpos[1]) / 2
            scale = max(width, height)
            result = []
            for rawpt in rawret:
                point = [0.0, 0.0]
                point[0] = (rawpt[0] - cx) / scale
                point[1] = (rawpt[1] - cy) / scale
                result.append(point)

            result = torch.tensor(result).float().to(self.device.device)
            self.last_raw_results.append(rawret)
            self.last_converted_results.append(result)
            self.last_scores.append(scores)
Exemple #3
0
def main():
    """Visualize the demo images.
    Input image edge coordinates as bbox.
    """
    parser = ArgumentParser()
    parser.add_argument('pose_config', help='Config file for detection')
    parser.add_argument('pose_checkpoint', help='Checkpoint file')
    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device used for inference')
    parser.add_argument('--kpt-thr',
                        type=float,
                        default=0.3,
                        help='Keypoint score threshold')

    args = parser.parse_args()
    assert 'cuda' in args.device

    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(args.pose_config,
                                 args.pose_checkpoint,
                                 device=args.device)

    for scene in SCENE_NAMES:
        print('Processing scene: ', scene)
        scene_root = os.path.join(ROOT_DIR, scene)
        with open(os.path.join(scene_root, scene + '.json'), 'r') as load_f:
            batch_labels = json.load(load_f)
        save_dict = {}
        for pid in batch_labels.keys():
            if batch_labels[pid]:
                print('Processing scene: {} person: {}'.format(scene, pid))
                save_dict[pid] = []
                for batch in batch_labels[pid]:
                    buffer = []
                    images = batch['images']
                    # process each image
                    for img_name in images:
                        img_path = os.path.join(scene_root, pid, img_name)
                        img = Image.open(img_path)
                        width, height = img.size

                        # make person bounding boxes: [x,y,width,height]
                        person_bboxes = [[
                            int(width * 5 / 110),
                            int(height * 5 / 110),
                            int(width * 100 / 110),
                            int(height * 100 / 110)
                        ]]

                        # pose estimate on a single image.
                        pose_results = inference_top_down_pose_model(
                            pose_model, img_path, person_bboxes, format='xywh')
                        buffer.append(pose_results[0]['keypoints'].tolist())
                    save_dict[pid].append(buffer)
        json_string = json.dumps(save_dict, indent=2)
        with open(os.path.join(scene_root, scene + '_skeletons.json'),
                  "w") as f:
            f.write(json_string)
        break
Exemple #4
0
def main(args):
    os.makedirs(args.out_dir, exist_ok=True)

    # Inference single image by native apis.
    model = init_pose_model(args.config, args.checkpoint, device=args.device)
    if isinstance(model, TopDown):
        pytorch_result, _ = inference_top_down_pose_model(model,
                                                          args.img,
                                                          person_results=None)
    elif isinstance(model, (AssociativeEmbedding, )):
        pytorch_result, _ = inference_bottom_up_pose_model(model, args.img)
    else:
        raise NotImplementedError()

    vis_pose_result(model,
                    args.img,
                    pytorch_result,
                    out_file=osp.join(args.out_dir, 'pytorch_result.png'))

    # Inference single image by torchserve engine.
    url = 'http://' + args.inference_addr + '/predictions/' + args.model_name
    with open(args.img, 'rb') as image:
        response = requests.post(url, image)
    server_result = response.json()

    vis_pose_result(model,
                    args.img,
                    server_result,
                    out_file=osp.join(args.out_dir, 'torchserve_result.png'))
Exemple #5
0
 def _inference_top_down_pose_model(self, data):
     results = []
     for image in data:
         # use dummy person bounding box
         preds, _ = inference_top_down_pose_model(
             self.model, image, person_results=None)
         results.append(preds)
     return results
Exemple #6
0
def inference(detector,
              model,
              img,
              vis=False,
              bbox_thr=0.3,
              kpt_thr=0.3,
              dataset='TopDownCocoDataset',
              format='xyxy',
              return_heatmap=False,
              **kwargs):
    import torch as th
    from ml import cv
    from ml.vision.ops import dets_select
    # from xtcocotools.coco import COCO
    from mmpose.apis import (inference_top_down_pose_model, vis_pose_result)
    from mmpose.datasets import DatasetInfo

    model.to('cuda:0')
    model.eval()
    # result = model(return_loss=return_loss, **data)

    fp16 = kwargs.get('fp16', False)
    with th.cuda.amp.autocast(enabled=fp16):
        dets = detector.detect(img, size=640, conf_thres=0.4, iou_thres=0.5)
    persons = dets_select(dets, [0])
    ppls = [
        dets_f[persons_f].cpu().numpy()
        for dets_f, persons_f in zip(dets, persons)
    ]
    """
    Args:
        person_results(List[Tensor(N, 5)]): bboxes per class in order with scores
    """
    # print(ppls)
    person_results = [dict(bbox=ppl[:-1]) for ppl in ppls[0]]
    # print(person_results)
    pose_results, returned_outputs = inference_top_down_pose_model(
        model,
        img,
        person_results,
        bbox_thr=bbox_thr,
        format=format,
        dataset=dataset,
        # dataset_info=DatasetInfo({'dataset_name': dataset, 'flip_pairs': []}),
        return_heatmap=return_heatmap,
        outputs=None)
    if vis:
        img = cv.imread(img)
        vis_img = vis_pose_result(model,
                                  img,
                                  pose_results,
                                  dataset=dataset,
                                  kpt_score_thr=kpt_thr,
                                  show=False)
        return pose_results, vis_img
    return pose_results
Exemple #7
0
def inference_pose():
    print('Thread "pose" started')
    stop_watch = StopWatch(window=10)

    while True:
        while len(det_result_queue) < 1:
            time.sleep(0.001)
        with det_result_queue_mutex:
            ts_input, frame, t_info, mmdet_results = det_result_queue.popleft()

        pose_results_list = []
        for model_info, pose_history in zip(pose_model_list,
                                            pose_history_list):
            model_name = model_info['name']
            pose_model = model_info['model']
            cat_ids = model_info['cat_ids']
            pose_results_last = pose_history['pose_results_last']
            next_id = pose_history['next_id']

            with stop_watch.timeit(model_name):
                # process mmdet results
                det_results = process_mmdet_results(
                    mmdet_results,
                    class_names=det_model.CLASSES,
                    cat_ids=cat_ids)

                # inference pose model
                dataset_name = pose_model.cfg.data['test']['type']
                pose_results, _ = inference_top_down_pose_model(
                    pose_model,
                    frame,
                    det_results,
                    bbox_thr=args.det_score_thr,
                    format='xyxy',
                    dataset=dataset_name)

                pose_results, next_id = get_track_id(pose_results,
                                                     pose_results_last,
                                                     next_id,
                                                     use_oks=False,
                                                     tracking_thr=0.3,
                                                     use_one_euro=True,
                                                     fps=None)

                pose_results_list.append(pose_results)

                # update pose history
                pose_history['pose_results_last'] = pose_results
                pose_history['next_id'] = next_id

        t_info += stop_watch.report_strings()
        with pose_result_queue_mutex:
            pose_result_queue.append((ts_input, t_info, pose_results_list))

        event_inference_done.set()
Exemple #8
0
def pose_inference(args, frame_paths, det_results):
    model = init_pose_model(args.pose_config, args.pose_checkpoint,
                            args.device)
    ret = []
    print('Performing Human Pose Estimation for each frame')
    prog_bar = mmcv.ProgressBar(len(frame_paths))
    for f, d in zip(frame_paths, det_results):
        # Align input format
        d = [dict(bbox=x) for x in list(d)]
        pose = inference_top_down_pose_model(model, f, d, format='xyxy')[0]
        ret.append(pose)
        prog_bar.update()
    return ret
def pose_inference(args, frame_paths, det_results):
    model = init_pose_model(args.pose_config, args.pose_checkpoint,
                            args.device)
    print('Performing Human Pose Estimation for each frame')
    prog_bar = mmcv.ProgressBar(len(frame_paths))

    num_frame, num_person = det_results.shape[:2]
    kp = np.zeros((num_person, num_frame, 17, 3), dtype=np.float32)

    for i, (f, d) in enumerate(zip(frame_paths, det_results)):
        # Align input format
        d = [dict(bbox=x) for x in list(d) if x[-1] > 0.5]
        pose = inference_top_down_pose_model(model, f, d, format='xyxy')[0]
        for j, item in enumerate(pose):
            kp[j, i] = item['keypoints']
        prog_bar.update()
    return kp
Exemple #10
0
def test_top_down_demo():
    skeleton = [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12],
                [7, 13], [6, 7], [6, 8], [7, 9], [8, 10], [9, 11], [2, 3],
                [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]]

    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(
        'configs/top_down/resnet/coco/res50_coco_256x192.py',
        None,
        device='cpu')

    image_name = 'tests/data/coco/000000000785.jpg'
    # test a single image, with a list of bboxes.
    pose_results = inference_top_down_pose_model(
        pose_model, image_name, [[50, 50, 50, 100]], format='xywh')

    # show the results
    vis_pose_result(pose_model, image_name, pose_results, skeleton=skeleton)
Exemple #11
0
def main():
    args = parse_args()
    cfg = Config.fromfile(args.config)

    device = 'cuda:0' if torch.cuda.is_available() else None

    model = init_pose_model(config=cfg,
                            checkpoint=args.checkpoint,
                            device=device)
    img_path = args.img_path

    if os.path.isfile(img_path):
        Exception("--img-path value is not a valid file path")
    elif lower(img_path.split('.')[-1]) not in VALID_IMG_TYPES:
        Exception(
            f"--img-path value is not a valid file type. \n Valid file types are {VALID_IMG_TYPES}"
        )

    output = inference_top_down_pose_model(model, img_path)
def det_posestim(det_model, img, pose_model, args, dataset):
    det_results = inference_detector(det_model, img)

    person_bboxes = det_results[0].copy()

    pose_results = inference_top_down_pose_model(pose_model,
                                                 img,
                                                 person_bboxes,
                                                 bbox_thr=args.bbox_thr,
                                                 format='xyxy',
                                                 dataset=dataset)

    vis_img = vis_pose_result(pose_model,
                              img,
                              pose_results,
                              dataset=dataset,
                              kpt_score_thr=args.kpt_thr,
                              show=False)

    return vis_img, pose_results
    def process(self, input_msgs):
        input_msg = input_msgs['input']
        img = input_msg.get_image()

        if self.det_countdown == 0:
            # get objects by detection model
            self.det_countdown = self.det_interval
            preds = inference_detector(self.det_model, img)
            objects_det = self._post_process_det(preds)
        else:
            # get object by pose tracking
            objects_det = self._get_objects_by_tracking(img.shape)

        self.det_countdown -= 1

        objects_pose, _ = inference_top_down_pose_model(self.pose_model,
                                                        img,
                                                        objects_det,
                                                        bbox_thr=self.bbox_thr,
                                                        format='xyxy')

        objects, next_id = get_track_id(objects_pose,
                                        self.track_info.last_objects,
                                        self.track_info.next_id,
                                        use_oks=False,
                                        tracking_thr=0.3)

        self.track_info.next_id = next_id
        self.track_info.last_objects = objects.copy()

        # Pose smoothing
        if self.smoother:
            objects = self.smoother.smooth(objects)

        for obj in objects:
            obj['det_model_cfg'] = self.det_model.cfg
            obj['pose_model_cfg'] = self.pose_model.cfg

        input_msg.update_objects(objects)

        return input_msg
    def process(self, input_msgs):

        input_msg = input_msgs['input']
        img = input_msg.get_image()

        if self.class_ids:
            objects = input_msg.get_objects(
                lambda x: x.get('class_id') in self.class_ids)
        elif self.labels:
            objects = input_msg.get_objects(
                lambda x: x.get('label') in self.labels)
        else:
            objects = input_msg.get_objects()
        # Inference pose
        objects, _ = inference_top_down_pose_model(
            self.model, img, objects, bbox_thr=self.bbox_thr, format='xyxy')

        # Pose tracking
        objects, next_id = get_track_id(
            objects,
            self.track_info.last_objects,
            self.track_info.next_id,
            use_oks=False,
            tracking_thr=0.3)

        self.track_info.next_id = next_id
        # Copy the prediction to avoid track_info being affected by smoothing
        self.track_info.last_objects = [obj.copy() for obj in objects]

        # Pose smoothing
        if self.smoother:
            objects = self.smoother.smooth(objects)

        for obj in objects:
            obj['pose_model_cfg'] = self.model.cfg
        input_msg.update_objects(objects)

        return input_msg
Exemple #15
0
def test_top_down_pose_tracking_demo():
    # COCO demo
    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(
        'configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/'
        'coco/res50_coco_256x192.py',
        None,
        device='cpu')
    image_name = 'tests/data/coco/000000000785.jpg'
    dataset_info = DatasetInfo(pose_model.cfg.data['test']['dataset_info'])
    person_result = [{'bbox': [50, 50, 50, 100]}]

    # test a single image, with a list of bboxes.
    pose_results, _ = inference_top_down_pose_model(pose_model,
                                                    image_name,
                                                    person_result,
                                                    format='xywh',
                                                    dataset_info=dataset_info)
    pose_results, next_id = get_track_id(pose_results, [], next_id=0)
    # show the results
    vis_pose_tracking_result(pose_model,
                             image_name,
                             pose_results,
                             dataset_info=dataset_info)
    pose_results_last = pose_results

    # AIC demo
    pose_model = init_pose_model(
        'configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/'
        'aic/res50_aic_256x192.py',
        None,
        device='cpu')
    image_name = 'tests/data/aic/054d9ce9201beffc76e5ff2169d2af2f027002ca.jpg'
    dataset_info = DatasetInfo(pose_model.cfg.data['test']['dataset_info'])
    # test a single image, with a list of bboxes.
    pose_results, _ = inference_top_down_pose_model(pose_model,
                                                    image_name,
                                                    person_result,
                                                    format='xywh',
                                                    dataset_info=dataset_info)
    pose_results, next_id = get_track_id(pose_results, pose_results_last,
                                         next_id)
    for pose_result in pose_results:
        del pose_result['bbox']
    pose_results, next_id = get_track_id(pose_results, pose_results_last,
                                         next_id)

    # show the results
    vis_pose_tracking_result(pose_model,
                             image_name,
                             pose_results,
                             dataset_info=dataset_info)

    # OneHand10K demo
    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(
        'configs/hand/2d_kpt_sview_rgb_img/topdown_heatmap/'
        'onehand10k/res50_onehand10k_256x256.py',
        None,
        device='cpu')
    image_name = 'tests/data/onehand10k/9.jpg'
    dataset_info = DatasetInfo(pose_model.cfg.data['test']['dataset_info'])
    # test a single image, with a list of bboxes.
    pose_results, _ = inference_top_down_pose_model(
        pose_model,
        image_name, [{
            'bbox': [10, 10, 30, 30]
        }],
        format='xywh',
        dataset_info=dataset_info)
    pose_results, next_id = get_track_id(pose_results, pose_results_last,
                                         next_id)
    # show the results
    vis_pose_tracking_result(pose_model,
                             image_name,
                             pose_results,
                             dataset_info=dataset_info)

    # InterHand2D demo
    pose_model = init_pose_model(
        'configs/hand/2d_kpt_sview_rgb_img/topdown_heatmap/'
        'interhand2d/res50_interhand2d_all_256x256.py',
        None,
        device='cpu')
    image_name = 'tests/data/interhand2.6m/image2017.jpg'
    dataset_info = DatasetInfo(pose_model.cfg.data['test']['dataset_info'])
    # test a single image, with a list of bboxes.
    pose_results, _ = inference_top_down_pose_model(pose_model,
                                                    image_name, [{
                                                        'bbox': [50, 50, 0, 0]
                                                    }],
                                                    format='xywh',
                                                    dataset_info=dataset_info)
    pose_results, next_id = get_track_id(pose_results, [], next_id=0)
    # show the results
    vis_pose_tracking_result(pose_model,
                             image_name,
                             pose_results,
                             dataset_info=dataset_info)
    pose_results_last = pose_results

    # MPII demo
    pose_model = init_pose_model(
        'configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/'
        'mpii/res50_mpii_256x256.py',
        None,
        device='cpu')
    image_name = 'tests/data/mpii/004645041.jpg'
    dataset_info = DatasetInfo(pose_model.cfg.data['test']['dataset_info'])
    # test a single image, with a list of bboxes.
    pose_results, _ = inference_top_down_pose_model(pose_model,
                                                    image_name, [{
                                                        'bbox': [50, 50, 0, 0]
                                                    }],
                                                    format='xywh',
                                                    dataset_info=dataset_info)
    pose_results, next_id = get_track_id(pose_results, pose_results_last,
                                         next_id)
    # show the results
    vis_pose_tracking_result(pose_model,
                             image_name,
                             pose_results,
                             dataset_info=dataset_info)
def main():
    parser = ArgumentParser()
    parser.add_argument('pose_lifter_config',
                        help='Config file for the 2nd stage pose lifter model')
    parser.add_argument(
        'pose_lifter_checkpoint',
        help='Checkpoint file for the 2nd stage pose lifter model')
    parser.add_argument('--pose-detector-conifig',
                        type=str,
                        default=None,
                        help='Config file for the 1st stage 2D pose detector')
    parser.add_argument(
        '--pose-detector-checkpoint',
        type=str,
        default=None,
        help='Checkpoint file for the 1st stage 2D pose detector')
    parser.add_argument('--img-root', type=str, default='', help='Image root')
    parser.add_argument(
        '--json-file',
        type=str,
        default=None,
        help='Json file containing image and bbox inforamtion. Optionally,'
        'The Jons file can also contain 2D pose information. See'
        '"only-second-stage"')
    parser.add_argument(
        '--camera-param-file',
        type=str,
        default=None,
        help='Camera parameter file for converting 3D pose predictions from '
        ' the camera space to to world space. If None, no conversion will be '
        'applied.')
    parser.add_argument(
        '--only-second-stage',
        action='store_true',
        help='If true, load 2D pose detection result from the Json file and '
        'skip the 1st stage. The pose detection model will be ignored.')
    parser.add_argument(
        '--rebase-keypoint-height',
        action='store_true',
        help='Rebase the predicted 3D pose so its lowest keypoint has a '
        'height of 0 (landing on the ground). This is useful for '
        'visualization when the model do not predict the global position '
        'of the 3D pose.')
    parser.add_argument(
        '--show-ground-truth',
        action='store_true',
        help='If True, show ground truth if it is available. The ground truth '
        'should be contained in the annotations in the Json file with the key '
        '"keypoints_3d" for each instance.')
    parser.add_argument('--show',
                        action='store_true',
                        default=False,
                        help='whether to show img')
    parser.add_argument('--out-img-root',
                        type=str,
                        default=None,
                        help='Root of the output visualization images. '
                        'Default not saving the visualization images.')
    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device for inference')
    parser.add_argument('--kpt-thr', type=float, default=0.3)
    parser.add_argument('--radius',
                        type=int,
                        default=4,
                        help='Keypoint radius for visualization')
    parser.add_argument('--thickness',
                        type=int,
                        default=1,
                        help='Link thickness for visualization')

    args = parser.parse_args()
    assert args.show or (args.out_img_root != '')

    coco = COCO(args.json_file)

    # First stage: 2D pose detection
    pose_det_results_list = []
    if args.only_second_stage:
        from mmpose.apis.inference import _xywh2xyxy

        print('Stage 1: load 2D pose results from Json file.')
        for image_id, image in coco.imgs.items():
            image_name = osp.join(args.img_root, image['file_name'])
            ann_ids = coco.getAnnIds(image_id)
            pose_det_results = []
            for ann_id in ann_ids:
                ann = coco.anns[ann_id]
                keypoints = np.array(ann['keypoints']).reshape(-1, 3)
                keypoints[..., 2] = keypoints[..., 2] >= 1
                keypoints_3d = np.array(ann['keypoints_3d']).reshape(-1, 4)
                keypoints_3d[..., 3] = keypoints_3d[..., 3] >= 1
                bbox = np.array(ann['bbox']).reshape(1, -1)

                pose_det_result = {
                    'image_name': image_name,
                    'bbox': _xywh2xyxy(bbox),
                    'keypoints': keypoints,
                    'keypoints_3d': keypoints_3d
                }
                pose_det_results.append(pose_det_result)
            pose_det_results_list.append(pose_det_results)

    else:
        print('Stage 1: 2D pose detection.')

        pose_det_model = init_pose_model(args.pose_detector_config,
                                         args.pose_detector_checkpoint,
                                         device=args.device.lower())

        assert pose_det_model.cfg.model.type == 'TopDown', 'Only "TopDown"' \
            'model is supported for the 1st stage (2D pose detection)'

        dataset = pose_det_model.cfg.data['test']['type']
        img_keys = list(coco.imgs.keys())

        for i in mmcv.track_iter_progress(range(len(img_keys))):
            # get bounding box annotations
            image_id = img_keys[i]
            image = coco.loadImgs(image_id)[0]
            image_name = osp.join(args.img_root, image['file_name'])
            ann_ids = coco.getAnnIds(image_id)

            # make person results for single image
            person_results = []
            for ann_id in ann_ids:
                person = {}
                ann = coco.anns[ann_id]
                person['bbox'] = ann['bbox']
                person_results.append(person)

            pose_det_results, _ = inference_top_down_pose_model(
                pose_det_model,
                image_name,
                person_results,
                bbox_thr=None,
                format='xywh',
                dataset=dataset,
                return_heatmap=False,
                outputs=None)

            for res in pose_det_results:
                res['image_name'] = image_name
            pose_det_results_list.append(pose_det_results)

    # Second stage: Pose lifting
    print('Stage 2: 2D-to-3D pose lifting.')

    pose_lift_model = init_pose_model(args.pose_lifter_config,
                                      args.pose_lifter_checkpoint,
                                      device=args.device.lower())

    assert pose_lift_model.cfg.model.type == 'PoseLifter', 'Only' \
        '"PoseLifter" model is supported for the 2nd stage ' \
        '(2D-to-3D lifting)'
    dataset = pose_lift_model.cfg.data['test']['type']

    camera_params = None
    if args.camera_param_file is not None:
        camera_params = mmcv.load(args.camera_param_file)

    for i, pose_det_results in enumerate(
            mmcv.track_iter_progress(pose_det_results_list)):
        # 2D-to-3D pose lifting
        # Note that the pose_det_results are regarded as a single-frame pose
        # sequence
        pose_lift_results = inference_pose_lifter_model(
            pose_lift_model,
            pose_results_2d=[pose_det_results],
            dataset=dataset,
            with_track_id=False)

        image_name = pose_det_results[0]['image_name']

        # Pose processing
        pose_lift_results_vis = []
        for idx, res in enumerate(pose_lift_results):
            keypoints_3d = res['keypoints_3d']
            # project to world space
            if camera_params is not None:
                keypoints_3d = _keypoint_camera_to_world(
                    keypoints_3d,
                    camera_params=camera_params,
                    image_name=image_name,
                    dataset=dataset)
            # rebase height (z-axis)
            if args.rebase_keypoint_height:
                keypoints_3d[..., 2] -= np.min(keypoints_3d[..., 2],
                                               axis=-1,
                                               keepdims=True)
            res['keypoints_3d'] = keypoints_3d
            # Add title
            det_res = pose_det_results[idx]
            instance_id = det_res.get('track_id', idx)
            res['title'] = f'Prediction ({instance_id})'
            pose_lift_results_vis.append(res)
            # Add ground truth
            if args.show_ground_truth:
                if 'keypoints_3d' not in det_res:
                    print('Fail to show ground truth. Please make sure that'
                          ' the instance annotations from the Json file'
                          ' contain "keypoints_3d".')
                else:
                    gt = res.copy()
                    gt['keypoints_3d'] = det_res['keypoints_3d']
                    gt['title'] = f'Ground truth ({instance_id})'
                    pose_lift_results_vis.append(gt)

        # Visualization
        if args.out_img_root is None:
            out_file = None
        else:
            os.makedirs(args.out_img_root, exist_ok=True)
            out_file = osp.join(args.out_img_root, f'vis_{i}.jpg')

        vis_3d_pose_result(pose_lift_model,
                           result=pose_lift_results_vis,
                           img=pose_lift_results[0]['image_name'],
                           out_file=out_file)
def main():
    """Visualize the demo images.

    Using mmdet to detect the human.
    """
    parser = ArgumentParser()
    parser.add_argument('det_config', help='Config file for detection')
    parser.add_argument('det_checkpoint', help='Checkpoint file for detection')
    parser.add_argument('pose_config', help='Config file for pose')
    parser.add_argument('pose_checkpoint', help='Checkpoint file for pose')
    parser.add_argument('--img-root', type=str, default='', help='Image root')
    parser.add_argument('--img', type=str, default='', help='Image file')
    parser.add_argument(
        '--show',
        action='store_true',
        default=False,
        help='whether to show img')
    parser.add_argument(
        '--out-img-root',
        type=str,
        default='',
        help='root of the output img file. '
        'Default not saving the visualization images.')
    parser.add_argument(
        '--device', default='cuda:0', help='Device used for inference')
    parser.add_argument(
        '--det-cat-id',
        type=int,
        default=1,
        help='Category id for bounding box detection model')
    parser.add_argument(
        '--bbox-thr',
        type=float,
        default=0.3,
        help='Bounding box score threshold')
    parser.add_argument(
        '--kpt-thr', type=float, default=0.3, help='Keypoint score threshold')
    parser.add_argument(
        '--radius',
        type=int,
        default=4,
        help='Keypoint radius for visualization')
    parser.add_argument(
        '--thickness',
        type=int,
        default=1,
        help='Link thickness for visualization')

    assert has_mmdet, 'Please install mmdet to run the demo.'

    args = parser.parse_args()

    assert args.show or (args.out_img_root != '')
    assert args.img != ''
    assert args.det_config is not None
    assert args.det_checkpoint is not None

    det_model = init_detector(
        args.det_config, args.det_checkpoint, device=args.device.lower())
    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(
        args.pose_config, args.pose_checkpoint, device=args.device.lower())

    dataset = pose_model.cfg.data['test']['type']
    dataset_info = pose_model.cfg.data['test'].get('dataset_info', None)
    if dataset_info is None:
        warnings.warn(
            'Please set `dataset_info` in the config.'
            'Check https://github.com/open-mmlab/mmpose/pull/663 for details.',
            DeprecationWarning)
    else:
        dataset_info = DatasetInfo(dataset_info)

    image_name = os.path.join(args.img_root, args.img)

    # test a single image, the resulting box is (x1, y1, x2, y2)
    mmdet_results = inference_detector(det_model, image_name)

    # keep the person class bounding boxes.
    person_results = process_mmdet_results(mmdet_results, args.det_cat_id)

    # test a single image, with a list of bboxes.

    # optional
    return_heatmap = False

    # e.g. use ('backbone', ) to return backbone feature
    output_layer_names = None

    pose_results, returned_outputs = inference_top_down_pose_model(
        pose_model,
        image_name,
        person_results,
        bbox_thr=args.bbox_thr,
        format='xyxy',
        dataset=dataset,
        dataset_info=dataset_info,
        return_heatmap=return_heatmap,
        outputs=output_layer_names)

    if args.out_img_root == '':
        out_file = None
    else:
        os.makedirs(args.out_img_root, exist_ok=True)
        out_file = os.path.join(args.out_img_root, f'vis_{args.img}')

    # show the results
    vis_pose_result(
        pose_model,
        image_name,
        pose_results,
        dataset=dataset,
        dataset_info=dataset_info,
        kpt_score_thr=args.kpt_thr,
        radius=args.radius,
        thickness=args.thickness,
        show=args.show,
        out_file=out_file)
Exemple #18
0
def test_pose_tracking_demo():
    # COCO demo
    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(
        'configs/top_down/resnet/coco/res50_coco_256x192.py',
        None,
        device='cpu')
    image_name = 'tests/data/coco/000000000785.jpg'
    # test a single image, with a list of bboxes.
    pose_results, _ = inference_top_down_pose_model(pose_model,
                                                    image_name,
                                                    [[50, 50, 50, 100]],
                                                    format='xywh')
    pose_results, next_id = get_track_id(pose_results, [], next_id=0)
    # show the results
    vis_pose_tracking_result(pose_model, image_name, pose_results)
    pose_results_last = pose_results

    # AIC demo
    pose_model = init_pose_model(
        'configs/top_down/resnet/aic/res50_aic_256x192.py', None, device='cpu')
    image_name = 'tests/data/aic/054d9ce9201beffc76e5ff2169d2af2f027002ca.jpg'
    # test a single image, with a list of bboxes.
    pose_results, _ = inference_top_down_pose_model(
        pose_model,
        image_name, [[50, 50, 50, 100]],
        format='xywh',
        dataset='TopDownAicDataset')
    pose_results, next_id = get_track_id(pose_results, pose_results_last,
                                         next_id)
    # show the results
    vis_pose_tracking_result(pose_model,
                             image_name,
                             pose_results,
                             dataset='TopDownAicDataset')

    # OneHand10K demo
    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(
        'configs/hand/resnet/onehand10k/res50_onehand10k_256x256.py',
        None,
        device='cpu')
    image_name = 'tests/data/onehand10k/9.jpg'
    # test a single image, with a list of bboxes.
    pose_results, _ = inference_top_down_pose_model(
        pose_model,
        image_name, [[10, 10, 30, 30]],
        format='xywh',
        dataset='OneHand10KDataset')
    pose_results, next_id = get_track_id(pose_results, pose_results_last,
                                         next_id)
    # show the results
    vis_pose_tracking_result(pose_model,
                             image_name,
                             pose_results,
                             dataset='OneHand10KDataset')

    # InterHand2D demo
    pose_model = init_pose_model(
        'configs/hand/resnet/interhand2d/res50_interhand2d_all_256x256.py',
        None,
        device='cpu')
    image_name = 'tests/data/interhand2d/image2017.jpg'
    # test a single image, with a list of bboxes.
    pose_results, _ = inference_top_down_pose_model(
        pose_model,
        image_name, [[50, 50, 0, 0]],
        format='xywh',
        dataset='InterHand2DDataset')
    pose_results, next_id = get_track_id(pose_results, [], next_id=0)
    # show the results
    vis_pose_tracking_result(pose_model,
                             image_name,
                             pose_results,
                             dataset='InterHand2DDataset')
    pose_results_last = pose_results

    # MPII demo
    pose_model = init_pose_model(
        'configs/top_down/resnet/mpii/res50_mpii_256x256.py',
        None,
        device='cpu')
    image_name = 'tests/data/mpii/004645041.jpg'
    # test a single image, with a list of bboxes.
    pose_results, _ = inference_top_down_pose_model(
        pose_model,
        image_name, [[50, 50, 0, 0]],
        format='xywh',
        dataset='TopDownMpiiDataset')
    pose_results, next_id = get_track_id(pose_results, pose_results_last,
                                         next_id)
    # show the results
    vis_pose_tracking_result(pose_model,
                             image_name,
                             pose_results,
                             dataset='TopDownMpiiDataset')

    with pytest.raises(NotImplementedError):
        vis_pose_tracking_result(pose_model,
                                 image_name,
                                 pose_results,
                                 dataset='test')
Exemple #19
0
def main():
    """Visualize the demo images.

    Using mmdet to detect the human.
    """
    parser = ArgumentParser()
    parser.add_argument('det_config', help='Config file for detection')
    parser.add_argument('det_checkpoint', help='Checkpoint file for detection')
    parser.add_argument('pose_config', help='Config file for pose')
    parser.add_argument('pose_checkpoint', help='Checkpoint file for pose')
    parser.add_argument('--video-path', type=str, help='Video path')
    parser.add_argument('--show',
                        action='store_true',
                        default=False,
                        help='whether to show visualizations.')
    parser.add_argument('--out-video-root',
                        default='',
                        help='Root of the output video file. '
                        'Default not saving the visualization video.')
    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device used for inference')
    parser.add_argument('--bbox-thr',
                        type=float,
                        default=0.3,
                        help='Bounding box score threshold')
    parser.add_argument('--kpt-thr',
                        type=float,
                        default=0.3,
                        help='Keypoint score threshold')
    parser.add_argument('--iou-thr',
                        type=float,
                        default=0.3,
                        help='IoU score threshold')

    assert has_mmdet, 'Please install mmdet to run the demo.'

    args = parser.parse_args()

    assert args.show or (args.out_video_root != '')
    assert args.det_config is not None
    assert args.det_checkpoint is not None

    det_model = init_detector(args.det_config,
                              args.det_checkpoint,
                              device=args.device.lower())
    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(args.pose_config,
                                 args.pose_checkpoint,
                                 device=args.device.lower())

    dataset = pose_model.cfg.data['test']['type']

    cap = cv2.VideoCapture(args.video_path)

    if args.out_video_root == '':
        save_out_video = False
    else:
        os.makedirs(args.out_video_root, exist_ok=True)
        save_out_video = True

    if save_out_video:
        fps = cap.get(cv2.CAP_PROP_FPS)
        size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
                int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        videoWriter = cv2.VideoWriter(
            os.path.join(args.out_video_root,
                         f'vis_{os.path.basename(args.video_path)}'), fourcc,
            fps, size)

    # optional
    return_heatmap = False

    # e.g. use ('backbone', ) to return backbone feature
    output_layer_names = None

    next_id = 0
    pose_results = []
    while (cap.isOpened()):
        pose_results_last = pose_results

        flag, img = cap.read()
        if not flag:
            break
        # test a single image, the resulting box is (x1, y1, x2, y2)
        mmdet_results = inference_detector(det_model, img)

        # keep the person class bounding boxes.
        person_results = process_mmdet_results(mmdet_results)

        # test a single image, with a list of bboxes.
        pose_results, returned_outputs = inference_top_down_pose_model(
            pose_model,
            img,
            person_results,
            bbox_thr=args.bbox_thr,
            format='xyxy',
            dataset=dataset,
            return_heatmap=return_heatmap,
            outputs=output_layer_names)

        # get track id for each person instance
        pose_results, next_id = get_track_id(pose_results,
                                             pose_results_last,
                                             next_id,
                                             iou_thr=args.iou_thr)

        # show the results
        vis_img = vis_pose_tracking_result(pose_model,
                                           img,
                                           pose_results,
                                           dataset=dataset,
                                           kpt_score_thr=args.kpt_thr,
                                           show=False)

        if args.show:
            cv2.imshow('Image', vis_img)

        if save_out_video:
            videoWriter.write(vis_img)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    if save_out_video:
        videoWriter.release()
    cv2.destroyAllWindows()
Exemple #20
0
def main():
    """Visualize the demo images.

    Using mmdet to detect the human.
    """
    parser = ArgumentParser()
    parser.add_argument('det_config', help='Config file for detection')
    parser.add_argument('det_checkpoint', help='Checkpoint file for detection')
    parser.add_argument('pose_config', help='Config file for pose')
    parser.add_argument('pose_checkpoint', help='Checkpoint file for pose')
    parser.add_argument('--img-root', type=str, default='', help='Image root')
    parser.add_argument('--img', type=str, default='', help='Image file')
    parser.add_argument('--show',
                        action='store_true',
                        default=False,
                        help='whether to show img')
    parser.add_argument('--out-img-root',
                        type=str,
                        default='',
                        help='root of the output img file. '
                        'Default not saving the visualization images.')
    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device used for inference')
    parser.add_argument('--bbox-thr',
                        type=float,
                        default=0.3,
                        help='Bounding bbox score threshold')
    parser.add_argument('--kpt-thr',
                        type=float,
                        default=0.3,
                        help='Keypoint score threshold')

    assert has_mmdet, 'Please install mmdet to run the demo.'

    args = parser.parse_args()

    assert args.show or (args.out_img_root != '')
    assert args.img != ''
    assert args.det_config is not None
    assert args.det_checkpoint is not None

    det_model = init_detector(args.det_config,
                              args.det_checkpoint,
                              device=args.device.lower())
    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(args.pose_config,
                                 args.pose_checkpoint,
                                 device=args.device.lower())

    dataset = pose_model.cfg.data['test']['type']

    image_name = os.path.join(args.img_root, args.img)

    # test a single image, the resulting box is (x1, y1, x2, y2)
    mmdet_results = inference_detector(det_model, image_name)

    # keep the person class bounding boxes.
    person_results = process_mmdet_results(mmdet_results)

    # test a single image, with a list of bboxes.

    # optional
    return_heatmap = False

    # e.g. use ('backbone', ) to return backbone feature
    output_layer_names = None

    pose_results, returned_outputs = inference_top_down_pose_model(
        pose_model,
        image_name,
        person_results,
        bbox_thr=args.bbox_thr,
        format='xyxy',
        dataset=dataset,
        return_heatmap=return_heatmap,
        outputs=output_layer_names)

    if args.out_img_root == '':
        out_file = None
    else:
        os.makedirs(args.out_img_root, exist_ok=True)
        out_file = os.path.join(args.out_img_root, f'vis_{args.img}')

    # show the results
    vis_pose_result(pose_model,
                    image_name,
                    pose_results,
                    dataset=dataset,
                    kpt_score_thr=args.kpt_thr,
                    show=args.show,
                    out_file=out_file)
Exemple #21
0
def loop(args, rotate, fname, person_bboxes, pose_model, flipped=False):

    cap = cv2.VideoCapture(args.video_path)

    fps = cap.get(cv2.CAP_PROP_FPS)
    frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

    if rotate:
        size = (int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)),
                int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)))
    else:
        size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
                int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
    m_dim = max(size)
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    videoWriter = cv2.VideoWriter(fname, fourcc, fps, size)
    poses = np.zeros((frames,
                      pose_model.cfg.channel_cfg['num_output_channels'], 2))
    dataset = pose_model.cfg.data['test']['type']

    skip_ratio = 1

    lmin = 1
    lmax = 0
    rmin = 1
    rmax = 0

    frame = 0
    t0 = time.perf_counter()
    prev_pose = 0
    while (cap.isOpened()):
        t1 = time.perf_counter()
        flag, img = cap.read()
        if rotate:
            img = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE)
        if flipped:
            img = cv2.flip(img, 1)
        if not flag:
            break

        # check every nd frame
        if frame % skip_ratio == 0:
            # test a single image, with a list of bboxes.
            pose_results = inference_top_down_pose_model(pose_model, img,
                                                         person_bboxes,
                                                         bbox_thr=args.box_thr,
                                                         format='xyxy',
                                                         dataset=dataset)
            t = time.perf_counter()

            print('Frame {0} out of {1} '.format(frame, frames) +
                  'analysed in {0} secs. '.format(t - t1) +
                  'Total time: {0} secs'.format(t - t0))

            # show the results
            if np.shape(pose_results)[0] > 0:
                prev_pose = pose_results

                ratios = pose_results[0]['keypoints'][:, 0:2] / m_dim

                lmin = min((ratios[13, 1], lmin))
                lmax = max((ratios[13, 1], lmax))
                rmin = min((ratios[14, 1], rmin))
                rmax = max((ratios[14, 1], rmax))

                if not flipped and ((rmax - rmin) > 0.1 or
                                    (frame > 150 and
                                     (rmax - rmin) > (lmax - lmin))):
                    # flipped = True
                    print('Left knee evaluated, restarting ' +
                          'with flipped images...')
                    cap.release()
                    videoWriter.release()
                    cv2.destroyAllWindows()
                    loop(args, rotate, fname, flip_box(person_bboxes, size[0]),
                         pose_model, True)
                    return

                poses[frame, ...] = ratios

            else:
                pose_results = prev_pose  # or maybe just skip saving
                print('lol')

        else:
            pose_results = prev_pose

        vis_img = vis_pose_result(pose_model, img, pose_results,
                                  dataset=dataset, kpt_score_thr=args.kpt_thr,
                                  show=False)

        if args.show or frame % skip_ratio == 0:
            cv2.imshow('Image', vis_img)
        frame += 1

        # if save_out_video:
        videoWriter.write(vis_img)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    # if save_out_video:
    videoWriter.release()
    out_file = fname.replace('.mp4', '.npy')
    np.save(out_file, poses)

    cv2.destroyAllWindows()
Exemple #22
0
def main():
    """Visualize the demo images.

    Require the json_file containing boxes.
    """
    parser = ArgumentParser()
    parser.add_argument('pose_config', help='Config file for detection')
    parser.add_argument('pose_checkpoint', help='Checkpoint file')
    parser.add_argument('--img-root', type=str, default='', help='Image root')
    parser.add_argument('--json-file',
                        type=str,
                        default='',
                        help='Json file containing image info.')
    parser.add_argument('--show',
                        action='store_true',
                        default=False,
                        help='whether to show img')
    parser.add_argument('--out-img-root',
                        type=str,
                        default='',
                        help='Root of the output img file. '
                        'Default not saving the visualization images.')
    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device used for inference')
    parser.add_argument('--kpt-thr',
                        type=float,
                        default=0.3,
                        help='Keypoint score threshold')

    args = parser.parse_args()

    assert args.show or (args.out_img_root != '')

    skeleton = [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12],
                [7, 13], [6, 7], [6, 8], [7, 9], [8, 10], [9, 11], [2, 3],
                [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]]

    from pycocotools.coco import COCO
    coco = COCO(args.json_file)
    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(args.pose_config,
                                 args.pose_checkpoint,
                                 device=args.device)

    img_keys = list(coco.imgs.keys())

    # process each image
    for i in range(len(img_keys)):
        # get bounding box annotations
        image_id = img_keys[i]
        image = coco.loadImgs(image_id)[0]
        image_name = os.path.join(args.img_root, image['file_name'])
        ann_ids = coco.getAnnIds(image_id)

        # make person bounding boxes
        person_bboxes = []
        for ann_id in ann_ids:
            ann = coco.anns[ann_id]
            # bbox format is 'xywh'
            bbox = ann['bbox']
            person_bboxes.append(bbox)

        # test a single image, with a list of bboxes.
        pose_results = inference_top_down_pose_model(pose_model,
                                                     image_name,
                                                     person_bboxes,
                                                     format='xywh')

        if args.out_img_root == '':
            out_file = None
        else:
            os.makedirs(args.out_img_root, exist_ok=True)
            out_file = os.path.join(args.out_img_root, f'vis_{i}.jpg')

        # show the results
        vis_pose_result(pose_model,
                        image_name,
                        pose_results,
                        skeleton=skeleton,
                        kpt_score_thr=args.kpt_thr,
                        show=args.show,
                        out_file=out_file)
Exemple #23
0
def main():
    """Visualize the demo images.

    Using mmdet to detect the human.
    """
    parser = ArgumentParser()
    # parser.add_argument('det_config', help='Config file for detection')
    # parser.add_argument('det_checkpoint', help='Checkpoint file for detection')
    parser.add_argument('pose_config', help='Config file for pose')
    parser.add_argument('pose_checkpoint', help='Checkpoint file for pose')
    parser.add_argument('--video-path', type=str, help='Video path')
    parser.add_argument('--show', action='store_true', default=False,
                        help='whether to show visualizations.')
    parser.add_argument('--out-video-root', default='',
                        help='Root of the output video file. '
                        'Default not saving the visualization video.')
    parser.add_argument('--device', default='cpu',
                        help='Device used for inference')
    parser.add_argument('--bbox-thr', type=float, default=0.3,
                        help='Bounding box score threshold')
    parser.add_argument('--kpt-thr', type=float, default=0.3,
                        help='Keypoint score threshold')
    parser.add_argument('--file_name', type=str, default='')
    parser.add_argument('--only_box', type=bool, default=False)
    # parser.add_argument('--csv-path', type=str, help='CSV path')

    args = parser.parse_args()

    assert args.show or (args.out_video_root != '')
    # assert args.det_config is not None
    # assert args.det_checkpoint is not None

    # build the pose model from a config file and a checkpoint file

    pose_model = init_pose_model(args.pose_config, args.pose_checkpoint,
                                 device=args.device)
    print('loaded pose model')

    dataset = pose_model.cfg.data['test']['type']

    print(dataset)

    mod_used = pose_model.cfg.model['backbone']['type']

    print('model used {0}'.format(mod_used))

    cap = cv2.VideoCapture(args.video_path)
    print('loaded video...')
    print('checking orientation and position')

    flag, img = cap.read()
    cap.release()
    person_bboxes, flip = box_check(img)
    cap = cv2.VideoCapture(args.video_path)

    print(args.only_box)
    if args.only_box:
        # cv2.waitKey(0)
        return

    frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

    if args.out_video_root == '':
        save_out_video = False
    else:
        os.makedirs(args.out_video_root, exist_ok=True)
        save_out_video = True
        print('save path: {0}'.format(args.out_video_root))

    if save_out_video:
        fps = cap.get(cv2.CAP_PROP_FPS)
        if flip:
            size = (int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)),
                    int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)))
        else:
            size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
                    int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
        m_dim = max(size)
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        if args.file_name == '':
            fname = os.path.join(args.out_video_root,
                                 f'vis_{os.path.basename(args.video_path)}')
            # if os.path.basename(args.video_path).find()
            fname = fname.replace(fname[fname.find('.', -5)::], '')
            fname += mod_used + dataset + '.mp4'
            print('FN {0}'.format(fname))
            while os.path.isfile(fname):
                fname = fname.replace('.mp4', '')

                idx = fname.find('-', -4)
                if idx == -1:
                    fname += '-0.mp4'
                else:
                    fname = fname.replace(fname[idx + 1::],
                                          str(int(fname[idx + 1::])
                                              + 1) + '.mp4')
        else:
            fname = os.path.join(args.out_video_root, args.file_name)

        print(fname)
        videoWriter = cv2.VideoWriter(fname, fourcc, fps, size)

    print(pose_model.cfg.channel_cfg['num_output_channels'])
    poses = np.zeros((frames,
                      pose_model.cfg.channel_cfg['num_output_channels'], 3))
    # poses[-1, 0:2] = size
    print(poses.shape)

    frame = 0
    t0 = time.perf_counter()
    prev_pose = 0

    width = (cap.get(3))
    height = (cap.get(4))

    print('width: {0}, height: {1}'.format(width, height))

    skip_ratio = 1

    # person_bboxes = [[2 * width / 10, height /
    #                   8, 0.9 * width, 7 * height / 8, 1]]

    # person_bboxes = [[2 * width / 10, height /
    #                   5, 0.9 * width, 4 * height / 5, 1]]
    # person_bboxes = [[2*width/10, 0, 0.9*width, height, 1]]
    # person_bboxes = [[3 * width / 10, 0, 0.6 * width, height, 1]]
    # person_bboxes = [[35 * width / 10, 0.1 *
    #                   height, 0.7 * width, 0.95 * height, 1]]
    print(person_bboxes)
    # rmin = np.ones(2)
    # rmax = np.zeros(2)
    # lmin = np.ones(2)
    # lmax = np.zeros(2)
    lmin = 1
    lmax = 0
    rmin = 1
    rmax = 0
    while (cap.isOpened()):
        t1 = time.perf_counter()
        flag, img = cap.read()
        if flip:
            img = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE)
        if not flag:
            break

        # check every 2nd frame
        if frame % skip_ratio == 0:
            # test a single image, the resulting box is (x1, y1, x2, y2)
            # det_results = inference_detector(det_model, img)
            # # keep the person class bounding boxes.
            #
            # person_bboxes = np.expand_dims(
            #     np.array(det_results[0])[0, :], axis=0)
            #
            # print(person_bboxes)

            # test a single image, with a list of bboxes.
            pose_results = inference_top_down_pose_model(pose_model, img,
                                                         person_bboxes,
                                                         bbox_thr=args.bbox_thr,
                                                         format='xyxy',
                                                         dataset=dataset)

            t = time.perf_counter()
            print('Frame {0} out of {3} analysed in {1} secs. Total time: {2} secs\
                    '.format(frame, t - t1, t - t0, frames))

            # show the results
            if np.shape(pose_results)[0] > 0:
                prev_pose = pose_results
                # x_ratios = pose_results[0]['keypoints'][:, 0] / m_dim
                # y_ratios = pose_results[0]['keypoints'][:, 1] / m_dim
                ratios = pose_results[0]['keypoints'][:, 0:2] / m_dim

                lmin = min((ratios[13, 1], lmin))
                lmax = max((ratios[13, 1], lmax))
                rmin = min((ratios[14, 0], rmin))
                rmax = max((ratios[14, 1], rmax))
                # lmin[0] = min((ratios[13, 0], lmin[0]))
                # lmin[1] = min((ratios[13, 1], lmin[1]))
                # lmax[0] = max((ratios[13, 0], lmax[0]))
                # lmax[1] = max((ratios[13, 1], lmax[1]))
                #
                # rmin[0] = min((ratios[14, 0], rmin[0]))
                # rmin[1] = min((ratios[14, 1], rmin[1]))
                # rmax[0] = max((ratios[14, 0], rmax[0]))
                # rmax[1] = max((ratios[14, 1], rmax[1]))

                if (rmax - rmin) > 0.1 or (frame > 150 and
                                           (rmax - rmin) > (lmax - lmin)):

                poses[frame, ...] = ratios
                # poses[frame, :, 0] = x_ratios
                # poses[frame, :, 1] = y_ratios
                # poses[frame, :, 0] = pose_results[0]['keypoints'][:, 0] / m_dim
                # poses[frame, :, 1] = pose_results[0]['keypoints'][:, 1] / m_dim

            else:
                pose_results = prev_pose  # or maybe just skip saving
                print('lol')

        else:
            pose_results = prev_pose

        vis_img = vis_pose_result(pose_model, img, pose_results,
                                  dataset=dataset, kpt_score_thr=args.kpt_thr,
                                  show=False)

        if args.show or frame % skip_ratio == 0:
            cv2.imshow('Image', vis_img)
        frame += 1

        if save_out_video:
            videoWriter.write(vis_img)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    if save_out_video:
        videoWriter.release()
        out_file = fname.replace('.mp4', '.npy')
        np.save(out_file, poses)

    cv2.destroyAllWindows()


if __name__ == '__main__':
    print('starting...')
    main()
def main():
    """Visualize the demo images.

    Using mmdet to detect the human.
    """
    parser = ArgumentParser()
    parser.add_argument('det_config', help='Config file for detection')
    parser.add_argument('det_checkpoint', help='Checkpoint file for detection')
    parser.add_argument('pose_config', help='Config file for pose')
    parser.add_argument('pose_checkpoint', help='Checkpoint file for pose')
    parser.add_argument('--video-path', type=str, help='Video path')
    parser.add_argument('--show',
                        action='store_true',
                        default=False,
                        help='whether to show visualizations.')
    parser.add_argument('--out-video-root',
                        default='',
                        help='Root of the output video file. '
                        'Default not saving the visualization video.')
    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device used for inference')
    parser.add_argument('--bbox-thr',
                        type=float,
                        default=0.3,
                        help='Bounding box score threshold')
    parser.add_argument('--kpt-thr',
                        type=float,
                        default=0.3,
                        help='Keypoint score threshold')

    args = parser.parse_args()

    assert args.show or (args.out_video_root != '')
    assert args.det_config is not None
    assert args.det_checkpoint is not None

    det_model = init_detector(args.det_config,
                              args.det_checkpoint,
                              device=args.device)
    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(args.pose_config,
                                 args.pose_checkpoint,
                                 device=args.device)

    dataset = pose_model.cfg.data['test']['type']

    cap = cv2.VideoCapture(args.video_path)

    if args.out_video_root == '':
        save_out_video = False
    else:
        os.makedirs(args.out_video_root, exist_ok=True)
        save_out_video = True

    if save_out_video:
        fps = cap.get(cv2.CAP_PROP_FPS)
        size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
                int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
        # size = (int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)),
        #         int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)))
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        videoWriter = cv2.VideoWriter(
            os.path.join(args.out_video_root,
                         f'vis_{os.path.basename(args.video_path)}'), fourcc,
            fps, size)

    # optional
    return_heatmap = False

    # e.g. use ('backbone', ) to return backbone feature
    output_layer_names = None

    kp_coco_fn = "coco_kp.json"
    kp_coco = None
    with open(kp_coco_fn) as f:
        kp_coco = json.load(f)

    idx_img = kp_coco["images"][-1]["id"] if len(kp_coco["images"]) > 0 else 0
    idx_ann = kp_coco["annotations"][-1]["id"] if len(
        kp_coco["annotations"]) > 0 else 0

    while (cap.isOpened()):
        images = []
        annotations = []

        flag, img = cap.read()
        # img = cv2.rotate(img, cv2.cv2.ROTATE_90_CLOCKWISE)
        if not flag:
            break

        time_s = time.time()
        # test a single image, the resulting box is (x1, y1, x2, y2)
        mmdet_results = inference_detector(det_model, img)

        # keep the person class bounding boxes.
        person_bboxes = process_mmdet_results(mmdet_results)

        # test a single image, with a list of bboxes.
        pose_results, returned_outputs = inference_top_down_pose_model(
            pose_model,
            img,
            person_bboxes,
            bbox_thr=args.bbox_thr,
            format='xyxy',
            dataset=dataset,
            return_heatmap=return_heatmap,
            outputs=output_layer_names)
        print(1. / (time.time() - time_s))

        # save image and keypoints
        bbox = []
        pose = []
        for res in pose_results:
            bbox.extend(res['bbox'])
            pose.extend(res['keypoints'])

        # show the results

        time_stamp = time.time()
        img_name = "{}.jpg".format(time_stamp)
        # mmcv.imwrite(img, img_name)
        height, width, channels = img.shape
        idx_img = idx_img + 1

        now = datetime.datetime.now()
        img_obj = dict({
            "license": 4,
            "file_name": img_name,
            "height": height,
            "width": width,
            "date_captured": now.strftime('%Y-%m-%d %H:%M:%S'),
            "id": idx_img
        })

        images.append(img_obj)

        keypoints = []
        idx_ann = idx_ann + 1
        for po in pose:
            x, y, c = po
            keypoints.append([int(x), int(y), 1.0])
            # keypoints.extend([int(x), int(y), 2]) # visible

        bboxes = []
        for bb in bbox:
            x, y, w, h, c = bb
            bboxes.append([int(x), int(y), int(w), int(h), 1.0])
            # bboxes.extend([int(x), int(y), int(w), int(h)]) # visible

        anno_obj = dict({
            "num_keypoints": 1,
            "iscrowd": 0,
            "bbox": bboxes,
            "keypoints": keypoints,
            "category_id": 1,
            "image_id": idx_img,
            "id": idx_ann
        })

        annotations.append(anno_obj)

        kp_coco["annotations"] = annotations
        kp_coco["images"] = images
        if len(bboxes) > 0:
            kp_coco_angles = pair_angles(kp_coco, dict_angles)

            vis_img = show_result_angles(img,
                                         kp_coco['annotations'],
                                         skeleton,
                                         classnames,
                                         angles_list=kp_coco_angles,
                                         pose_kpt_color=pose_kpt_color,
                                         pose_limb_color=pose_limb_color)

        if args.show:
            cv2.imshow('Image', vis_img)

        if save_out_video:
            videoWriter.write(vis_img)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    if save_out_video:
        videoWriter.release()
    cv2.destroyAllWindows()
Exemple #25
0
def main():
    """Visualize the demo images.

    Using mmdet to detect the human.
    """
    parser = ArgumentParser()
    parser.add_argument('det_config', help='Config file for detection')
    parser.add_argument('det_checkpoint', help='Checkpoint file for detection')
    parser.add_argument('pose_config', help='Config file for pose')
    parser.add_argument('pose_checkpoint', help='Checkpoint file for pose')
    parser.add_argument('--video-path', type=str, help='Video path')
    parser.add_argument('--show',
                        action='store_true',
                        default=False,
                        help='whether to show visualizations.')
    parser.add_argument('--out-video-root',
                        default='',
                        help='Root of the output video file. '
                        'Default not saving the visualization video.')
    parser.add_argument('--device',
                        default='cpu',
                        help='Device used for inference')
    parser.add_argument('--bbox-thr',
                        type=float,
                        default=0.3,
                        help='Bounding box score threshold')
    parser.add_argument('--kpt-thr',
                        type=float,
                        default=0.3,
                        help='Keypoint score threshold')

    args = parser.parse_args()

    assert args.show or (args.out_video_root != '')
    assert args.det_config is not None
    assert args.det_checkpoint is not None

    det_model = init_detector(args.det_config,
                              args.det_checkpoint,
                              device=args.device)
    print('loaded detection model')
    # build the pose model from a config file and a checkpoint file
    print('pose config: {0} \npose checkpoint: {1}'.format(
        args.pose_config, args.pose_checkpoint))
    pose_model = init_pose_model(args.pose_config,
                                 args.pose_checkpoint,
                                 device=args.device)
    print('loaded poes model')

    dataset = pose_model.cfg.data['test']['type']

    print(dataset)

    cap = cv2.VideoCapture(args.video_path)

    print('loaded video')

    if args.out_video_root == '':
        save_out_video = False
    else:
        os.makedirs(args.out_video_root, exist_ok=True)
        save_out_video = True
        print('save path: {0}'.format(args.out_video_root))

    if save_out_video:
        fps = cap.get(cv2.CAP_PROP_FPS)
        size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
                int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        videoWriter = cv2.VideoWriter(
            os.path.join(args.out_video_root,
                         f'vis_{os.path.basename(args.video_path)}'), fourcc,
            fps, size)

    count = 0
    t0 = time.perf_counter()
    while (cap.isOpened()):
        t1 = time.perf_counter()
        flag, img = cap.read()

        if not flag:
            break
        # test a single image, the resulting box is (x1, y1, x2, y2)
        det_results = inference_detector(det_model, img)
        # keep the person class bounding boxes.
        person_bboxes = det_results[0].copy()

        # test a single image, with a list of bboxes.
        pose_results = inference_top_down_pose_model(pose_model,
                                                     img,
                                                     person_bboxes,
                                                     bbox_thr=args.bbox_thr,
                                                     format='xyxy',
                                                     dataset=dataset)

        count += 1
        t = time.perf_counter()
        print('Frame {0} analysed in {1} secs. Total time: {2} secs\
                '.format(count, t - t1, t - t0))

        # show the results
        vis_img = vis_pose_result(pose_model,
                                  img,
                                  pose_results,
                                  dataset=dataset,
                                  kpt_score_thr=args.kpt_thr,
                                  show=False)

        if args.show or count == 3:
            cv2.imshow('Image', vis_img)

        if save_out_video:
            videoWriter.write(vis_img)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    if save_out_video:
        videoWriter.release()
    cv2.destroyAllWindows()
Exemple #26
0
def main():
    """Visualize the demo images.
    Input image edge coordinates as bbox. 
    """
    parser = ArgumentParser()
    parser.add_argument('pose_config', help='Config file for detection')
    parser.add_argument('pose_checkpoint', help='Checkpoint file')
    parser.add_argument('--img-root', type=str, default='', help='Image root')
    parser.add_argument('--show',
                        action='store_true',
                        default=False,
                        help='whether to show img')
    parser.add_argument('--out-img-root',
                        type=str,
                        default='',
                        help='Root of the output img file. '
                        'Default not saving the visualization images.')
    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device used for inference')
    parser.add_argument('--kpt-thr',
                        type=float,
                        default=0.3,
                        help='Keypoint score threshold')

    args = parser.parse_args()

    assert args.show or (args.out_img_root != '')
    assert 'cuda' in args.device

    skeleton = [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12],
                [7, 13], [6, 7], [6, 8], [7, 9], [8, 10], [9, 11], [2, 3],
                [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]]

    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(args.pose_config,
                                 args.pose_checkpoint,
                                 device=args.device)

    img_name_list = []
    file_list = os.listdir(args.img_root)
    for file_name in sorted(file_list):
        if '.jpg' in file_name:
            img_name_list.append(file_name)
    save_list = []
    # process each image
    for i, img_name in enumerate(img_name_list):
        img_path = os.path.join(args.img_root, img_name)
        img = Image.open(img_path)
        width, height = img.size

        # make person bounding boxes: [x,y,width,height]
        person_bboxes = []
        person_bboxes.append([
            int(width * 5 / 110),
            int(height * 5 / 110),
            int(width * 100 / 110),
            int(height * 100 / 110)
        ])

        # test a single image, with a list of bboxes.
        pose_results = inference_top_down_pose_model(pose_model,
                                                     img_path,
                                                     person_bboxes,
                                                     format='xywh')
        print(len(pose_results[0]['keypoints'].tolist()))
        save_list.append(pose_results[0]['keypoints'].tolist())

        if args.out_img_root == '':
            out_file = None
        else:
            out_file = os.path.join(args.out_img_root, f'vis_{i}.jpg')

        # show the results
        vis_pose_result(pose_model,
                        img_path,
                        pose_results,
                        kpt_score_thr=args.kpt_thr,
                        show=args.show,
                        out_file=out_file)
    json_string = json.dumps(save_list, indent=2)
    with open(os.path.join(args.out_img_root, 'results.json'), "w") as f:
        f.write(json_string)
Exemple #27
0
def main():
    """Visualize the demo images.

    Using mmdet to detect the human.
    """
    parser = ArgumentParser()
    parser.add_argument('pose_config', help='Config file for pose')
    parser.add_argument('pose_checkpoint', help='Checkpoint file for pose')
    parser.add_argument('--video-path', type=str, help='Video path')
    parser.add_argument('--show',
                        action='store_true',
                        default=False,
                        help='whether to show visualizations.')
    parser.add_argument('--out-video-root',
                        default='',
                        help='Root of the output video file. '
                        'Default not saving the visualization video.')
    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device used for inference')
    parser.add_argument('--kpt-thr',
                        type=float,
                        default=0.3,
                        help='Keypoint score threshold')
    parser.add_argument('--radius',
                        type=int,
                        default=4,
                        help='Keypoint radius for visualization')
    parser.add_argument('--thickness',
                        type=int,
                        default=1,
                        help='Link thickness for visualization')

    assert has_face_det, 'Please install face_recognition to run the demo. '\
                         '"pip install face_recognition", For more details, '\
                         'see https://github.com/ageitgey/face_recognition'

    args = parser.parse_args()

    assert args.show or (args.out_video_root != '')

    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(args.pose_config,
                                 args.pose_checkpoint,
                                 device=args.device.lower())

    dataset = pose_model.cfg.data['test']['type']
    dataset_info = pose_model.cfg.data['test'].get('dataset_info', None)
    if dataset_info is None:
        warnings.warn(
            'Please set `dataset_info` in the config.'
            'Check https://github.com/open-mmlab/mmpose/pull/663 for details.',
            DeprecationWarning)
    else:
        dataset_info = DatasetInfo(dataset_info)

    cap = cv2.VideoCapture(args.video_path)
    assert cap.isOpened(), f'Faild to load video file {args.video_path}'

    if args.out_video_root == '':
        save_out_video = False
    else:
        os.makedirs(args.out_video_root, exist_ok=True)
        save_out_video = True

    if save_out_video:
        fps = cap.get(cv2.CAP_PROP_FPS)
        size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
                int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        videoWriter = cv2.VideoWriter(
            os.path.join(args.out_video_root,
                         f'vis_{os.path.basename(args.video_path)}'), fourcc,
            fps, size)

    # optional
    return_heatmap = False

    # e.g. use ('backbone', ) to return backbone feature
    output_layer_names = None

    while (cap.isOpened()):
        flag, img = cap.read()
        if not flag:
            break

        face_det_results = face_recognition.face_locations(
            cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
        face_results = process_face_det_results(face_det_results)

        # test a single image, with a list of bboxes.
        pose_results, returned_outputs = inference_top_down_pose_model(
            pose_model,
            img,
            face_results,
            bbox_thr=None,
            format='xyxy',
            dataset=dataset,
            dataset_info=dataset_info,
            return_heatmap=return_heatmap,
            outputs=output_layer_names)

        # show the results
        vis_img = vis_pose_result(pose_model,
                                  img,
                                  pose_results,
                                  radius=args.radius,
                                  thickness=args.thickness,
                                  dataset=dataset,
                                  dataset_info=dataset_info,
                                  kpt_score_thr=args.kpt_thr,
                                  show=False)

        if args.show:
            cv2.imshow('Image', vis_img)

        if save_out_video:
            videoWriter.write(vis_img)

        if args.show and cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    if save_out_video:
        videoWriter.release()
    if args.show:
        cv2.destroyAllWindows()
def main():
    """Visualize the demo images.

    Using mmdet to detect the human.
    """
    parser = ArgumentParser()
    parser.add_argument('det_config', help='Config file for detection')
    parser.add_argument('det_checkpoint', help='Checkpoint file for detection')
    parser.add_argument('pose_config', help='Config file for pose')
    parser.add_argument('pose_checkpoint', help='Checkpoint file for pose')
    parser.add_argument('--video-path', type=str, help='Video path')
    parser.add_argument('--show',
                        action='store_true',
                        default=False,
                        help='whether to show visualizations.')
    parser.add_argument('--out-video-root',
                        default='',
                        help='Root of the output video file. '
                        'Default not saving the visualization video.')
    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device used for inference')
    parser.add_argument('--det-cat-id',
                        type=int,
                        default=1,
                        help='Category id for bounding box detection model')
    parser.add_argument('--bbox-thr',
                        type=float,
                        default=0.3,
                        help='Bounding box score threshold')
    parser.add_argument('--kpt-thr',
                        type=float,
                        default=0.3,
                        help='Keypoint score threshold')
    parser.add_argument('--use-oks-tracking',
                        action='store_true',
                        help='Using OKS tracking')
    parser.add_argument('--tracking-thr',
                        type=float,
                        default=0.3,
                        help='Tracking threshold')
    parser.add_argument('--euro',
                        action='store_true',
                        help='Using One_Euro_Filter for smoothing')
    parser.add_argument('--radius',
                        type=int,
                        default=4,
                        help='Keypoint radius for visualization')
    parser.add_argument('--thickness',
                        type=int,
                        default=1,
                        help='Link thickness for visualization')

    assert has_mmdet, 'Please install mmdet to run the demo.'

    args = parser.parse_args()

    assert args.show or (args.out_video_root != '')
    assert args.det_config is not None
    assert args.det_checkpoint is not None

    det_model = init_detector(args.det_config,
                              args.det_checkpoint,
                              device=args.device.lower())
    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(args.pose_config,
                                 args.pose_checkpoint,
                                 device=args.device.lower())

    dataset = pose_model.cfg.data['test']['type']
    dataset_info = pose_model.cfg.data['test'].get('dataset_info', None)
    if dataset_info is None:
        warnings.warn(
            'Please set `dataset_info` in the config.'
            'Check https://github.com/open-mmlab/mmpose/pull/663 for details.',
            DeprecationWarning)
    else:
        dataset_info = DatasetInfo(dataset_info)

    cap = cv2.VideoCapture(args.video_path)
    fps = None

    assert cap.isOpened(), f'Faild to load video file {args.video_path}'

    if args.out_video_root == '':
        save_out_video = False
    else:
        os.makedirs(args.out_video_root, exist_ok=True)
        save_out_video = True

    if save_out_video:
        fps = cap.get(cv2.CAP_PROP_FPS)
        size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
                int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        videoWriter = cv2.VideoWriter(
            os.path.join(args.out_video_root,
                         f'vis_{os.path.basename(args.video_path)}'), fourcc,
            fps, size)

    # optional
    return_heatmap = False

    # e.g. use ('backbone', ) to return backbone feature
    output_layer_names = None

    next_id = 0
    pose_results = []
    while (cap.isOpened()):
        pose_results_last = pose_results

        flag, img = cap.read()
        if not flag:
            break
        # test a single image, the resulting box is (x1, y1, x2, y2)
        mmdet_results = inference_detector(det_model, img)

        # keep the person class bounding boxes.
        person_results = process_mmdet_results(mmdet_results, args.det_cat_id)

        # test a single image, with a list of bboxes.
        pose_results, returned_outputs = inference_top_down_pose_model(
            pose_model,
            img,
            person_results,
            bbox_thr=args.bbox_thr,
            format='xyxy',
            dataset=dataset,
            dataset_info=dataset_info,
            return_heatmap=return_heatmap,
            outputs=output_layer_names)

        # get track id for each person instance
        pose_results, next_id = get_track_id(pose_results,
                                             pose_results_last,
                                             next_id,
                                             use_oks=args.use_oks_tracking,
                                             tracking_thr=args.tracking_thr,
                                             use_one_euro=args.euro,
                                             fps=fps)

        # show the results
        vis_img = vis_pose_tracking_result(pose_model,
                                           img,
                                           pose_results,
                                           radius=args.radius,
                                           thickness=args.thickness,
                                           dataset=dataset,
                                           dataset_info=dataset_info,
                                           kpt_score_thr=args.kpt_thr,
                                           show=False)

        if args.show:
            cv2.imshow('Image', vis_img)

        if save_out_video:
            videoWriter.write(vis_img)

        if args.show and cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    if save_out_video:
        videoWriter.release()
    if args.show:
        cv2.destroyAllWindows()
Exemple #29
0
def main():
    """Visualize the demo images.

    Require the json_file containing boxes.
    """
    parser = ArgumentParser()
    parser.add_argument('pose_config', help='Config file for detection')
    parser.add_argument('pose_checkpoint', help='Checkpoint file')
    parser.add_argument('--img-root', type=str, default='', help='Image root')
    parser.add_argument('--json-file',
                        type=str,
                        default='',
                        help='Json file containing image info.')
    parser.add_argument('--show',
                        action='store_true',
                        default=False,
                        help='whether to show img')
    parser.add_argument('--out-img-root',
                        type=str,
                        default='',
                        help='Root of the output img file. '
                        'Default not saving the visualization images.')
    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device used for inference')
    parser.add_argument('--kpt-thr',
                        type=float,
                        default=0.3,
                        help='Keypoint score threshold')

    args = parser.parse_args()

    assert args.show or (args.out_img_root != '')

    coco = COCO(args.json_file)
    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(args.pose_config,
                                 args.pose_checkpoint,
                                 device=args.device)

    dataset = pose_model.cfg.data['test']['type']

    img_keys = list(coco.imgs.keys())

    # optional
    return_heatmap = False

    # e.g. use ('backbone', ) to return backbone feature
    output_layer_names = None

    # process each image
    for i in range(len(img_keys)):
        # get bounding box annotations
        image_id = img_keys[i]
        image = coco.loadImgs(image_id)[0]
        image_name = os.path.join(args.img_root, image['file_name'])
        ann_ids = coco.getAnnIds(image_id)

        # make person bounding boxes
        person_bboxes = []
        for ann_id in ann_ids:
            ann = coco.anns[ann_id]
            # bbox format is 'xywh'
            bbox = ann['bbox']
            person_bboxes.append(bbox)

        # test a single image, with a list of bboxes
        pose_results, returned_outputs = inference_top_down_pose_model(
            pose_model,
            image_name,
            person_bboxes,
            bbox_thr=args.bbox_thr,
            format='xywh',
            dataset=dataset,
            return_heatmap=return_heatmap,
            outputs=output_layer_names)

        if args.out_img_root == '':
            out_file = None
        else:
            os.makedirs(args.out_img_root, exist_ok=True)
            out_file = os.path.join(args.out_img_root, f'vis_{i}.jpg')

        vis_pose_result(pose_model,
                        image_name,
                        pose_results,
                        dataset=dataset,
                        kpt_score_thr=args.kpt_thr,
                        show=args.show,
                        out_file=out_file)
Exemple #30
0
def main():
    parser = ArgumentParser()
    parser.add_argument('det_config', help='Config file for detection')
    parser.add_argument('det_checkpoint', help='Checkpoint file for detection')
    parser.add_argument(
        'pose_detector_config',
        type=str,
        default=None,
        help='Config file for the 1st stage 2D pose detector')
    parser.add_argument(
        'pose_detector_checkpoint',
        type=str,
        default=None,
        help='Checkpoint file for the 1st stage 2D pose detector')
    parser.add_argument(
        'pose_lifter_config',
        help='Config file for the 2nd stage pose lifter model')
    parser.add_argument(
        'pose_lifter_checkpoint',
        help='Checkpoint file for the 2nd stage pose lifter model')
    parser.add_argument(
        '--video-path', type=str, default='', help='Video path')
    parser.add_argument(
        '--rebase-keypoint-height',
        action='store_true',
        help='Rebase the predicted 3D pose so its lowest keypoint has a '
        'height of 0 (landing on the ground). This is useful for '
        'visualization when the model do not predict the global position '
        'of the 3D pose.')
    parser.add_argument(
        '--norm-pose-2d',
        action='store_true',
        help='Scale the bbox (along with the 2D pose) to the average bbox '
        'scale of the dataset, and move the bbox (along with the 2D pose) to '
        'the average bbox center of the dataset. This is useful when bbox '
        'is small, especially in multi-person scenarios.')
    parser.add_argument(
        '--num-instances',
        type=int,
        default=-1,
        help='The number of 3D poses to be visualized in every frame. If '
        'less than 0, it will be set to the number of pose results in the '
        'first frame.')
    parser.add_argument(
        '--show',
        action='store_true',
        default=False,
        help='whether to show visualizations.')
    parser.add_argument(
        '--out-video-root',
        type=str,
        default=None,
        help='Root of the output video file. '
        'Default not saving the visualization video.')
    parser.add_argument(
        '--device', default='cuda:0', help='Device for inference')
    parser.add_argument(
        '--det-cat-id',
        type=int,
        default=1,
        help='Category id for bounding box detection model')
    parser.add_argument(
        '--bbox-thr',
        type=float,
        default=0.9,
        help='Bounding box score threshold')
    parser.add_argument('--kpt-thr', type=float, default=0.3)
    parser.add_argument(
        '--use-oks-tracking', action='store_true', help='Using OKS tracking')
    parser.add_argument(
        '--tracking-thr', type=float, default=0.3, help='Tracking threshold')
    parser.add_argument(
        '--euro',
        action='store_true',
        help='Using One_Euro_Filter for smoothing')
    parser.add_argument(
        '--radius',
        type=int,
        default=8,
        help='Keypoint radius for visualization')
    parser.add_argument(
        '--thickness',
        type=int,
        default=2,
        help='Link thickness for visualization')

    assert has_mmdet, 'Please install mmdet to run the demo.'

    args = parser.parse_args()
    assert args.show or (args.out_video_root != '')
    assert args.det_config is not None
    assert args.det_checkpoint is not None

    video = mmcv.VideoReader(args.video_path)
    assert video.opened, f'Failed to load video file {args.video_path}'

    # First stage: 2D pose detection
    print('Stage 1: 2D pose detection.')

    person_det_model = init_detector(
        args.det_config, args.det_checkpoint, device=args.device.lower())

    pose_det_model = init_pose_model(
        args.pose_detector_config,
        args.pose_detector_checkpoint,
        device=args.device.lower())

    assert pose_det_model.cfg.model.type == 'TopDown', 'Only "TopDown"' \
        'model is supported for the 1st stage (2D pose detection)'

    pose_det_dataset = pose_det_model.cfg.data['test']['type']

    pose_det_results_list = []
    next_id = 0
    pose_det_results = []
    for frame in video:
        pose_det_results_last = pose_det_results

        # test a single image, the resulting box is (x1, y1, x2, y2)
        mmdet_results = inference_detector(person_det_model, frame)

        # keep the person class bounding boxes.
        person_det_results = process_mmdet_results(mmdet_results,
                                                   args.det_cat_id)

        # make person results for single image
        pose_det_results, _ = inference_top_down_pose_model(
            pose_det_model,
            frame,
            person_det_results,
            bbox_thr=args.bbox_thr,
            format='xyxy',
            dataset=pose_det_dataset,
            return_heatmap=False,
            outputs=None)

        # get track id for each person instance
        pose_det_results, next_id = get_track_id(
            pose_det_results,
            pose_det_results_last,
            next_id,
            use_oks=args.use_oks_tracking,
            tracking_thr=args.tracking_thr,
            use_one_euro=args.euro,
            fps=video.fps)

        pose_det_results_list.append(copy.deepcopy(pose_det_results))

    # Second stage: Pose lifting
    print('Stage 2: 2D-to-3D pose lifting.')

    pose_lift_model = init_pose_model(
        args.pose_lifter_config,
        args.pose_lifter_checkpoint,
        device=args.device.lower())

    assert pose_lift_model.cfg.model.type == 'PoseLifter', \
        'Only "PoseLifter" model is supported for the 2nd stage ' \
        '(2D-to-3D lifting)'
    pose_lift_dataset = pose_lift_model.cfg.data['test']['type']

    if args.out_video_root == '':
        save_out_video = False
    else:
        os.makedirs(args.out_video_root, exist_ok=True)
        save_out_video = True

    if save_out_video:
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        fps = video.fps
        writer = None

    # convert keypoint definition
    for pose_det_results in pose_det_results_list:
        for res in pose_det_results:
            keypoints = res['keypoints']
            res['keypoints'] = covert_keypoint_definition(
                keypoints, pose_det_dataset, pose_lift_dataset)

    # load temporal padding config from model.data_cfg
    if hasattr(pose_lift_model.cfg, 'test_data_cfg'):
        data_cfg = pose_lift_model.cfg.test_data_cfg
    else:
        data_cfg = pose_lift_model.cfg.data_cfg

    num_instances = args.num_instances
    for i, pose_det_results in enumerate(
            mmcv.track_iter_progress(pose_det_results_list)):
        # extract and pad input pose2d sequence
        pose_results_2d = extract_pose_sequence(
            pose_det_results_list,
            frame_idx=i,
            causal=data_cfg.causal,
            seq_len=data_cfg.seq_len,
            step=data_cfg.seq_frame_interval)
        # 2D-to-3D pose lifting
        pose_lift_results = inference_pose_lifter_model(
            pose_lift_model,
            pose_results_2d=pose_results_2d,
            dataset=pose_lift_dataset,
            with_track_id=True,
            image_size=video.resolution,
            norm_pose_2d=args.norm_pose_2d)

        # Pose processing
        pose_lift_results_vis = []
        for idx, res in enumerate(pose_lift_results):
            keypoints_3d = res['keypoints_3d']
            # exchange y,z-axis, and then reverse the direction of x,z-axis
            keypoints_3d = keypoints_3d[..., [0, 2, 1]]
            keypoints_3d[..., 0] = -keypoints_3d[..., 0]
            keypoints_3d[..., 2] = -keypoints_3d[..., 2]
            # rebase height (z-axis)
            if args.rebase_keypoint_height:
                keypoints_3d[..., 2] -= np.min(
                    keypoints_3d[..., 2], axis=-1, keepdims=True)
            res['keypoints_3d'] = keypoints_3d
            # add title
            det_res = pose_det_results[idx]
            instance_id = det_res['track_id']
            res['title'] = f'Prediction ({instance_id})'
            # only visualize the target frame
            res['keypoints'] = det_res['keypoints']
            res['bbox'] = det_res['bbox']
            res['track_id'] = instance_id
            pose_lift_results_vis.append(res)

        # Visualization
        if num_instances < 0:
            num_instances = len(pose_lift_results_vis)
        img_vis = vis_3d_pose_result(
            pose_lift_model,
            result=pose_lift_results_vis,
            img=video[i],
            out_file=None,
            radius=args.radius,
            thickness=args.thickness,
            num_instances=num_instances)

        if save_out_video:
            if writer is None:
                writer = cv2.VideoWriter(
                    osp.join(args.out_video_root,
                             f'vis_{osp.basename(args.video_path)}'), fourcc,
                    fps, (img_vis.shape[1], img_vis.shape[0]))
            writer.write(img_vis)

    if save_out_video:
        writer.release()