示例#1
0
def test_top_down_demo():
    # COCO demo
    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(
        'configs/top_down/resnet/coco/res50_coco_256x192.py',
        None,
        device='cpu')
    image_name = 'tests/data/coco/000000000785.jpg'

    person_result = []
    person_result.append({'bbox': [50, 50, 50, 100]})
    # test a single image, with a list of bboxes.
    pose_results, _ = inference_top_down_pose_model(
        pose_model, image_name, person_result, format='xywh')
    # show the results
    vis_pose_result(pose_model, image_name, pose_results)

    # AIC demo
    pose_model = init_pose_model(
        'configs/top_down/resnet/aic/res50_aic_256x192.py', None, device='cpu')
    image_name = 'tests/data/aic/054d9ce9201beffc76e5ff2169d2af2f027002ca.jpg'
    # test a single image, with a list of bboxes.
    pose_results, _ = inference_top_down_pose_model(
        pose_model,
        image_name,
        person_result,
        format='xywh',
        dataset='TopDownAicDataset')
    # show the results
    vis_pose_result(
        pose_model, image_name, pose_results, dataset='TopDownAicDataset')

    # OneHand10K demo
    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(
        'configs/hand/resnet/onehand10k/res50_onehand10k_256x256.py',
        None,
        device='cpu')
    image_name = 'tests/data/onehand10k/9.jpg'
    # test a single image, with a list of bboxes.
    pose_results, _ = inference_top_down_pose_model(
        pose_model,
        image_name,
        person_result,
        format='xywh',
        dataset='OneHand10KDataset')
    # show the results
    vis_pose_result(
        pose_model, image_name, pose_results, dataset='OneHand10KDataset')

    with pytest.raises(NotImplementedError):
        pose_results, _ = inference_top_down_pose_model(
            pose_model,
            image_name,
            person_result,
            format='xywh',
            dataset='test')
示例#2
0
def main():
    """Visualize the demo images.
    Input image edge coordinates as bbox.
    """
    parser = ArgumentParser()
    parser.add_argument('pose_config', help='Config file for detection')
    parser.add_argument('pose_checkpoint', help='Checkpoint file')
    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device used for inference')
    parser.add_argument('--kpt-thr',
                        type=float,
                        default=0.3,
                        help='Keypoint score threshold')

    args = parser.parse_args()
    assert 'cuda' in args.device

    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(args.pose_config,
                                 args.pose_checkpoint,
                                 device=args.device)

    for scene in SCENE_NAMES:
        print('Processing scene: ', scene)
        scene_root = os.path.join(ROOT_DIR, scene)
        with open(os.path.join(scene_root, scene + '.json'), 'r') as load_f:
            batch_labels = json.load(load_f)
        save_dict = {}
        for pid in batch_labels.keys():
            if batch_labels[pid]:
                print('Processing scene: {} person: {}'.format(scene, pid))
                save_dict[pid] = []
                for batch in batch_labels[pid]:
                    buffer = []
                    images = batch['images']
                    # process each image
                    for img_name in images:
                        img_path = os.path.join(scene_root, pid, img_name)
                        img = Image.open(img_path)
                        width, height = img.size

                        # make person bounding boxes: [x,y,width,height]
                        person_bboxes = [[
                            int(width * 5 / 110),
                            int(height * 5 / 110),
                            int(width * 100 / 110),
                            int(height * 100 / 110)
                        ]]

                        # pose estimate on a single image.
                        pose_results = inference_top_down_pose_model(
                            pose_model, img_path, person_bboxes, format='xywh')
                        buffer.append(pose_results[0]['keypoints'].tolist())
                    save_dict[pid].append(buffer)
        json_string = json.dumps(save_dict, indent=2)
        with open(os.path.join(scene_root, scene + '_skeletons.json'),
                  "w") as f:
            f.write(json_string)
        break
def test_bottom_up_pose_tracking_demo():
    # COCO demo
    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(
        'configs/body/2d_kpt_sview_rgb_img/associative_embedding/'
        'coco/res50_coco_512x512.py',
        None,
        device='cpu')

    image_name = 'tests/data/coco/000000000785.jpg'

    pose_results, _ = inference_bottom_up_pose_model(pose_model, image_name)

    pose_results, next_id = get_track_id(pose_results, [], next_id=0)

    # show the results
    vis_pose_tracking_result(pose_model,
                             image_name,
                             pose_results,
                             dataset='BottomUpCocoDataset')

    pose_results_last = pose_results

    # oks
    pose_results, next_id = get_track_id(pose_results,
                                         pose_results_last,
                                         next_id=next_id,
                                         use_oks=True)

    pose_results_last = pose_results
    # one_euro
    pose_results, next_id = get_track_id(pose_results,
                                         pose_results_last,
                                         next_id=next_id,
                                         use_one_euro=True)
示例#4
0
def main(args):
    os.makedirs(args.out_dir, exist_ok=True)

    # Inference single image by native apis.
    model = init_pose_model(args.config, args.checkpoint, device=args.device)
    if isinstance(model, TopDown):
        pytorch_result, _ = inference_top_down_pose_model(model,
                                                          args.img,
                                                          person_results=None)
    elif isinstance(model, (AssociativeEmbedding, )):
        pytorch_result, _ = inference_bottom_up_pose_model(model, args.img)
    else:
        raise NotImplementedError()

    vis_pose_result(model,
                    args.img,
                    pytorch_result,
                    out_file=osp.join(args.out_dir, 'pytorch_result.png'))

    # Inference single image by torchserve engine.
    url = 'http://' + args.inference_addr + '/predictions/' + args.model_name
    with open(args.img, 'rb') as image:
        response = requests.post(url, image)
    server_result = response.json()

    vis_pose_result(model,
                    args.img,
                    server_result,
                    out_file=osp.join(args.out_dir, 'torchserve_result.png'))
示例#5
0
def test_bottom_up_demo():

    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(
        'configs/body/2d_kpt_sview_rgb_img/associative_embedding/'
        'coco/res50_coco_512x512.py',
        None,
        device='cpu')

    image_name = 'tests/data/coco/000000000785.jpg'
    dataset_info = DatasetInfo(pose_model.cfg.data['test'].get(
        'dataset_info', None))

    pose_results, _ = inference_bottom_up_pose_model(pose_model,
                                                     image_name,
                                                     dataset_info=dataset_info)

    # show the results
    vis_pose_result(pose_model,
                    image_name,
                    pose_results,
                    dataset_info=dataset_info)

    # test dataset_info without sigmas
    pose_model_copy = copy.deepcopy(pose_model)

    pose_model_copy.cfg.data.test.dataset_info.pop('sigmas')
    pose_results, _ = inference_bottom_up_pose_model(pose_model_copy,
                                                     image_name,
                                                     dataset_info=dataset_info)
示例#6
0
    def __init__(self,
                 name: str,
                 model_config: str,
                 model_checkpoint: str,
                 input_buffer: str,
                 output_buffer: Union[str, List[str]],
                 enable_key: Optional[Union[str, int]] = None,
                 enable: bool = True,
                 device: str = 'cuda:0',
                 min_frame: int = 16,
                 fps: int = 30,
                 score_thr: float = 0.7,
                 multi_input: bool = True):

        super().__init__(
            name=name,
            enable_key=enable_key,
            enable=enable,
            multi_input=multi_input)

        self._clip_buffer = []  # items: (clip message, num of frames)
        self.score_thr = score_thr
        self.min_frame = min_frame
        self.fps = fps

        # Init model
        self.model_config = model_config
        self.model_checkpoint = model_checkpoint
        self.device = device.lower()
        self.model = init_pose_model(
            self.model_config, self.model_checkpoint, device=self.device)

        # Register buffers
        self.register_input_buffer(input_buffer, 'input', trigger=True)
        self.register_output_buffer(output_buffer)
示例#7
0
    def __init__(
            self,
            name: str,
            det_model_config: str,
            det_model_checkpoint: str,
            pose_model_config: str,
            pose_model_checkpoint: str,
            input_buffer: str,
            output_buffer: Union[str, List[str]],
            enable_key: Optional[Union[str, int]] = None,
            enable: bool = True,
            device: str = 'cuda:0',
            det_interval: int = 1,
            class_ids: Optional[List] = None,
            labels: Optional[List] = None,
            bbox_thr: float = 0.5,
            kpt2bbox_cfg: Optional[dict] = None,
            smooth: bool = False,
            smooth_filter_cfg: str = 'configs/_base_/filters/one_euro.py'):

        assert has_mmdet, \
            f'MMDetection is required for {self.__class__.__name__}.'

        super().__init__(name=name, enable_key=enable_key, enable=enable)

        self.det_model_config = get_config_path(det_model_config, 'mmdet')
        self.det_model_checkpoint = det_model_checkpoint
        self.pose_model_config = get_config_path(pose_model_config, 'mmpose')
        self.pose_model_checkpoint = pose_model_checkpoint
        self.device = device.lower()
        self.class_ids = class_ids
        self.labels = labels
        self.bbox_thr = bbox_thr
        self.det_interval = det_interval

        if not kpt2bbox_cfg:
            kpt2bbox_cfg = self.default_kpt2bbox_cfg
        self.kpt2bbox_cfg = copy.deepcopy(kpt2bbox_cfg)

        self.det_countdown = 0
        self.track_info = TrackInfo()

        if smooth:
            smooth_filter_cfg = get_config_path(smooth_filter_cfg, 'mmpose')
            self.smoother = Smoother(smooth_filter_cfg, keypoint_dim=2)
        else:
            self.smoother = None

        # init models
        self.det_model = init_detector(self.det_model_config,
                                       self.det_model_checkpoint,
                                       device=self.device)

        self.pose_model = init_pose_model(self.pose_model_config,
                                          self.pose_model_checkpoint,
                                          device=self.device)

        # register buffers
        self.register_input_buffer(input_buffer, 'input', trigger=True)
        self.register_output_buffer(output_buffer)
示例#8
0
def pose_inference(args, frame_paths, det_results):
    model = init_pose_model(args.pose_config, args.pose_checkpoint,
                            args.device)
    ret = []
    print('Performing Human Pose Estimation for each frame')
    prog_bar = mmcv.ProgressBar(len(frame_paths))
    for f, d in zip(frame_paths, det_results):
        # Align input format
        d = [dict(bbox=x) for x in list(d)]
        pose = inference_top_down_pose_model(model, f, d, format='xyxy')[0]
        ret.append(pose)
        prog_bar.update()
    return ret
示例#9
0
def test_interhand3d_demo():
    # H36M demo
    pose_model = init_pose_model(
        'configs/hand/3d_kpt_sview_rgb_img/internet/interhand3d/'
        'res50_interhand3d_all_256x256.py',
        None,
        device='cpu')

    image_name = 'tests/data/interhand2.6m/image2017.jpg'
    det_result = {
        'image_name': image_name,
        'bbox': [50, 50, 50, 50],  # bbox format is 'xywh'
        'camera_param': None,
        'keypoints_3d_gt': None
    }
    det_results = [det_result]
    dataset = pose_model.cfg.data['test']['type']
    dataset_info = DatasetInfo(pose_model.cfg.data['test']['dataset_info'])

    pose_results = inference_interhand_3d_model(pose_model,
                                                image_name,
                                                det_results,
                                                dataset=dataset)

    for res in pose_results:
        res['title'] = 'title'

    vis_3d_pose_result(
        pose_model,
        result=pose_results,
        img=det_results[0]['image_name'],
        dataset_info=dataset_info,
    )

    # test special cases
    # Empty det results
    _ = inference_interhand_3d_model(pose_model,
                                     image_name, [],
                                     dataset=dataset)

    if torch.cuda.is_available():
        _ = inference_interhand_3d_model(pose_model.cuda(),
                                         image_name,
                                         det_results,
                                         dataset=dataset)

    with pytest.raises(NotImplementedError):
        _ = inference_interhand_3d_model(pose_model,
                                         image_name,
                                         det_results,
                                         dataset='test')
示例#10
0
def test_bottom_up_demo():

    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(
        'configs/bottom_up/resnet/coco/res50_coco_512x512.py',
        None,
        device='cpu')

    image_name = 'tests/data/coco/000000000785.jpg'

    pose_results, _ = inference_bottom_up_pose_model(pose_model, image_name)

    # show the results
    vis_pose_result(
        pose_model, image_name, pose_results, dataset='BottomUpCocoDataset')
示例#11
0
    def initialize(self, context):
        properties = context.system_properties
        self.map_location = 'cuda' if torch.cuda.is_available() else 'cpu'
        self.device = torch.device(self.map_location + ':' +
                                   str(properties.get('gpu_id')) if torch.cuda.
                                   is_available() else self.map_location)
        self.manifest = context.manifest

        model_dir = properties.get('model_dir')
        serialized_file = self.manifest['model']['serializedFile']
        checkpoint = os.path.join(model_dir, serialized_file)
        self.config_file = os.path.join(model_dir, 'config.py')

        self.model = init_pose_model(self.config_file, checkpoint, self.device)
        self.initialized = True
示例#12
0
 def __init__(self, pose_c, pose_w, device):
     self.model_w = pose_w
     self.device = device
     self.pose_model = init_pose_model(pose_c, pose_w, device)
     self.pose_model.export = True    # set export and return convolution result
     self.dst_w = 192
     self.dst_h = 256
     self.input_size = [self.dst_w, self.dst_h]
     self.img_rgb = None
     self.img_p = None
     self.img_t = None
     self.img_s = None
     self.p_boxes = []
     self.mean = [0.485, 0.456, 0.406]
     self.std = [0.229, 0.224, 0.225]
     self.preds = []
     self.maxvals = []
示例#13
0
def test_bottom_up_demo():
    skeleton = [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12],
                [7, 13], [6, 7], [6, 8], [7, 9], [8, 10], [9, 11], [2, 3],
                [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]]

    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(
        'configs/bottom_up/resnet/coco/res50_coco_512x512.py',
        None,
        device='cpu')

    image_name = 'tests/data/coco/000000000785.jpg'

    pose_results = inference_bottom_up_pose_model(pose_model, image_name)

    # show the results
    vis_pose_result(pose_model, image_name, pose_results, skeleton=skeleton)
示例#14
0
def pose_inference(args, frame_paths, det_results):
    model = init_pose_model(args.pose_config, args.pose_checkpoint,
                            args.device)
    print('Performing Human Pose Estimation for each frame')
    prog_bar = mmcv.ProgressBar(len(frame_paths))

    num_frame, num_person = det_results.shape[:2]
    kp = np.zeros((num_person, num_frame, 17, 3), dtype=np.float32)

    for i, (f, d) in enumerate(zip(frame_paths, det_results)):
        # Align input format
        d = [dict(bbox=x) for x in list(d) if x[-1] > 0.5]
        pose = inference_top_down_pose_model(model, f, d, format='xyxy')[0]
        for j, item in enumerate(pose):
            kp[j, i] = item['keypoints']
        prog_bar.update()
    return kp
示例#15
0
def test_pose_lifter_demo():
    # H36M demo
    pose_model = init_pose_model(
        'configs/body/3d_kpt_sview_rgb_img/pose_lift/'
        'h36m/simplebaseline3d_h36m.py',
        None,
        device='cpu')

    pose_det_result = {
        'keypoints': np.zeros((17, 3)),
        'bbox': [50, 50, 50, 50],
        'track_id': 0,
        'image_name': 'tests/data/h36m/S1_Directions_1.54138969_000001.jpg',
    }

    pose_results_2d = [[pose_det_result]]

    dataset = pose_model.cfg.data['test']['type']

    _ = inference_pose_lifter_model(
        pose_model, pose_results_2d, dataset, with_track_id=False)

    pose_lift_results = inference_pose_lifter_model(
        pose_model, pose_results_2d, dataset, with_track_id=True)

    for res in pose_lift_results:
        res['title'] = 'title'
    vis_3d_pose_result(
        pose_model,
        pose_lift_results,
        img=pose_lift_results[0]['image_name'],
        dataset=dataset)

    # test special cases
    # Empty 2D results
    _ = inference_pose_lifter_model(
        pose_model, [[]], dataset, with_track_id=False)

    if torch.cuda.is_available():
        _ = inference_pose_lifter_model(
            pose_model.cuda(), pose_results_2d, dataset, with_track_id=False)

    with pytest.raises(NotImplementedError):
        _ = inference_pose_lifter_model(
            pose_model, pose_results_2d, dataset='test')
示例#16
0
def test_top_down_demo():
    skeleton = [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12],
                [7, 13], [6, 7], [6, 8], [7, 9], [8, 10], [9, 11], [2, 3],
                [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]]

    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(
        'configs/top_down/resnet/coco/res50_coco_256x192.py',
        None,
        device='cpu')

    image_name = 'tests/data/coco/000000000785.jpg'
    # test a single image, with a list of bboxes.
    pose_results = inference_top_down_pose_model(
        pose_model, image_name, [[50, 50, 50, 100]], format='xywh')

    # show the results
    vis_pose_result(pose_model, image_name, pose_results, skeleton=skeleton)
示例#17
0
 def __init__(self, args, device, skeleton):
     print("Initialize MMPoseDriver - begin.")
     self.det_model = init_detector(args.mmp_det_config,
                                    args.mmp_det_checkpoint,
                                    device=device.name)
     self.pose_model = init_pose_model(args.mmp_pose_config,
                                       args.mmp_pose_checkpoint,
                                       device=device.name)
     self.dataset = self.pose_model.cfg.data['test']['type']
     self.bbox_thr = args.mmp_bbox_thr
     self.kpt_thr = args.mmp_kpt_thr
     self.return_heatmap = False
     self.output_layer_names = None
     self.last_pose_results = None
     self.last_returned_outputs = None
     self.last_converted_results = None
     self.last_raw_results = None
     self.last_scores = None
     self.coco_to_sem = [
         [11, 12],  # sem 00: 'Hip'
         [12],  # sem 01: 'RHip' 
         [14],  # sem 02: 'RKnee' 
         [16],  # sem 03: 'RFoot' 
         [11],  # sem 04: 'LHip' 
         [13],  # sem 05: 'LKnee' 
         [15],  # sem 06: 'LFoot' 
         [5, 6, 11, 12],  # sem 07: 'Spine' 
         [5, 6],  # sem 08: 'Thorax' 
         [0],  # sem 09: 'Head' 
         [5],  # sem 10: 'LShoulder' 
         [7],  # sem 11: 'LElbow' 
         [9],  # sem 12: 'LWrist' 
         [6],  # sem 13: 'RShoulder' 
         [8],  # sem 14: 'RElbow' 
         [10]  # sem 15: 'RWrist' 
     ]
     self.device = device
     self.render_mmp = args.mmp_show_mmp
     self.render_2d = args.mmp_show_2d
     self.skeleton = skeleton
     self.render_score_threshold = args.render_score_threshold
     self.privacy = args.privacy
     print("Initialize MMPoseDriver - end.")
示例#18
0
def main():
    args = parse_args()
    cfg = Config.fromfile(args.config)

    device = 'cuda:0' if torch.cuda.is_available() else None

    model = init_pose_model(config=cfg,
                            checkpoint=args.checkpoint,
                            device=device)
    img_path = args.img_path

    if os.path.isfile(img_path):
        Exception("--img-path value is not a valid file path")
    elif lower(img_path.split('.')[-1]) not in VALID_IMG_TYPES:
        Exception(
            f"--img-path value is not a valid file type. \n Valid file types are {VALID_IMG_TYPES}"
        )

    output = inference_top_down_pose_model(model, img_path)
示例#19
0
def test_hand_gesture_demo():

    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(
        'configs/hand/gesture_sview_rgbd_vid/mtut/nvgesture/'
        'i3d_nvgesture_bbox_112x112_fps15.py',
        None,
        device='cpu')

    dataset_info = pose_model.cfg.data['test'].get('dataset_info', None)
    video_files = [
        'tests/data/nvgesture/sk_color.avi',
        'tests/data/nvgesture/sk_depth.avi'
    ]
    with open('tests/data/nvgesture/bboxes.json', 'r') as f:
        bbox = next(iter(json.load(f).values()))

    pred_label, _ = inference_gesture_model(pose_model, video_files, bbox,
                                            dataset_info)
示例#20
0
    def __init__(
            self,
            name: str,
            model_config: str,
            model_checkpoint: str,
            input_buffer: str,
            output_buffer: Union[str, List[str]],
            enable_key: Optional[Union[str, int]] = None,
            enable: bool = True,
            device: str = 'cuda:0',
            class_ids: Optional[List[int]] = None,
            labels: Optional[List[str]] = None,
            bbox_thr: float = 0.5,
            smooth: bool = False,
            smooth_filter_cfg: str = 'configs/_base_/filters/one_euro.py'):
        super().__init__(name=name, enable_key=enable_key, enable=enable)

        # Init model
        self.model_config = get_config_path(model_config, 'mmpose')
        self.model_checkpoint = model_checkpoint
        self.device = device.lower()

        self.class_ids = class_ids
        self.labels = labels
        self.bbox_thr = bbox_thr

        if smooth:
            smooth_filter_cfg = get_config_path(smooth_filter_cfg, 'mmpose')
            self.smoother = Smoother(smooth_filter_cfg, keypoint_dim=2)
        else:
            self.smoother = None
        # Init model
        self.model = init_pose_model(
            self.model_config, self.model_checkpoint, device=self.device)

        # Store history for pose tracking
        self.track_info = TrackInfo()

        # Register buffers
        self.register_input_buffer(input_buffer, 'input', trigger=True)
        self.register_output_buffer(output_buffer)
def get_pose(
        img,
        result_path,
        pose_config='./mobilenetv2_coco_512x512.py',
        pose_checkpoint='./mobilenetv2_coco_512x512-4d96e309_20200816.pth',
        device='cpu',
        kpt_thr=0.5):

    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(pose_config,
                                 pose_checkpoint,
                                 device=device.lower())
    # optional
    return_heatmap = False
    dataset = pose_model.cfg.data['test']['type']
    assert (dataset == 'BottomUpCocoDataset')

    # e.g. use ('backbone', ) to return backbone feature
    output_layer_names = None
    img = cv2.imread(img)

    pose_results, returned_outputs = inference_bottom_up_pose_model(
        pose_model,
        img,
        return_heatmap=return_heatmap,
        outputs=output_layer_names)
    # show the results
    vis_img = vis_pose_result(pose_model,
                              img,
                              pose_results,
                              dataset=dataset,
                              kpt_score_thr=kpt_thr,
                              show=False)
    cv2.imwrite(result_path, vis_img)

    sample0 = {"url": result_path}

    res_list = [sample0]

    return res_list
示例#22
0
def test_bottom_up_pose_tracking_demo():
    # COCO demo
    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(
        'configs/body/2d_kpt_sview_rgb_img/associative_embedding/'
        'coco/res50_coco_512x512.py',
        None,
        device='cpu')

    image_name = 'tests/data/coco/000000000785.jpg'
    dataset_info = DatasetInfo(pose_model.cfg.data['test']['dataset_info'])

    pose_results, _ = inference_bottom_up_pose_model(pose_model,
                                                     image_name,
                                                     dataset_info=dataset_info)

    pose_results, next_id = get_track_id(pose_results, [], next_id=0)

    # show the results
    vis_pose_tracking_result(pose_model,
                             image_name,
                             pose_results,
                             dataset_info=dataset_info)

    pose_results_last = pose_results

    # oks
    pose_results, next_id = get_track_id(pose_results,
                                         pose_results_last,
                                         next_id=next_id,
                                         use_oks=True)

    pose_results_last = pose_results

    # one_euro (will be deprecated)
    with pytest.deprecated_call():
        pose_results, next_id = get_track_id(pose_results,
                                             pose_results_last,
                                             next_id=next_id,
                                             use_one_euro=True)
示例#23
0
def test_body_mesh_demo():
    # H36M demo
    config = 'configs/body/3d_mesh_sview_rgb_img/hmr' \
             '/mixed/res50_mixed_224x224.py'
    config = mmcv.Config.fromfile(config)
    config.model.mesh_head.smpl_mean_params = \
        'tests/data/smpl/smpl_mean_params.npz'

    pose_model = None
    with tempfile.TemporaryDirectory() as tmpdir:
        config.model.smpl.smpl_path = tmpdir
        config.model.smpl.joints_regressor = osp.join(
            tmpdir, 'test_joint_regressor.npy')
        # generate weight file for SMPL model.
        generate_smpl_weight_file(tmpdir)
        pose_model = init_pose_model(config, device='cpu')

    assert pose_model is not None, 'Fail to build pose model'

    image_name = 'tests/data/h36m/S1_Directions_1.54138969_000001.jpg'
    det_result = {
        'keypoints': np.zeros((17, 3)),
        'bbox': [50, 50, 50, 50],
        'image_name': image_name,
    }

    # make person bounding boxes
    person_results = [det_result]
    dataset = pose_model.cfg.data['test']['type']

    # test a single image, with a list of bboxes
    pose_results = inference_mesh_model(pose_model,
                                        image_name,
                                        person_results,
                                        bbox_thr=None,
                                        format='xywh',
                                        dataset=dataset)

    vis_3d_mesh_result(pose_model, pose_results, image_name)
def main():
    args = parse_args()

    device = torch.device(args.device)

    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(args.pose_config,
                                 args.pose_checkpoint,
                                 device=args.device.lower())
    # optional
    return_heatmap = False
    dataset = pose_model.cfg.data['test']['type']
    assert (dataset == 'BottomUpCocoDataset')

    # e.g. use ('backbone', ) to return backbone feature
    output_layer_names = None
    print('Press "Esc", "q" or "Q" to exit.')
    while True:
        # ret_val, img = camera.read()
        img = cv2.imread(args.img_root)

        pose_results, returned_outputs = inference_bottom_up_pose_model(
            pose_model,
            img,
            return_heatmap=return_heatmap,
            outputs=output_layer_names)
        # show the results
        vis_img = vis_pose_result(pose_model,
                                  img,
                                  pose_results,
                                  dataset=dataset,
                                  kpt_score_thr=args.kpt_thr,
                                  show=False)
        ch = cv2.waitKey(1)
        if ch == 27 or ch == ord('q') or ch == ord('Q'):
            break

        cv2.imshow('Image', vis_img)
示例#25
0
def main():
    """Visualize the demo images.

    Require the json_file containing boxes.
    """
    parser = ArgumentParser()
    parser.add_argument('pose_config', help='Config file for detection')
    parser.add_argument('pose_checkpoint', help='Checkpoint file')
    parser.add_argument('--img-root', type=str, default='', help='Image root')
    parser.add_argument('--json-file',
                        type=str,
                        default='',
                        help='Json file containing image info.')
    parser.add_argument('--show',
                        action='store_true',
                        default=False,
                        help='whether to show img')
    parser.add_argument('--out-img-root',
                        type=str,
                        default='',
                        help='Root of the output img file. '
                        'Default not saving the visualization images.')
    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device used for inference')
    parser.add_argument('--kpt-thr',
                        type=float,
                        default=0.3,
                        help='Keypoint score threshold')

    args = parser.parse_args()

    assert args.show or (args.out_img_root != '')

    coco = COCO(args.json_file)
    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(args.pose_config,
                                 args.pose_checkpoint,
                                 device=args.device)

    dataset = pose_model.cfg.data['test']['type']

    img_keys = list(coco.imgs.keys())

    # optional
    return_heatmap = False

    # e.g. use ('backbone', ) to return backbone feature
    output_layer_names = None

    # process each image
    for i in range(len(img_keys)):
        # get bounding box annotations
        image_id = img_keys[i]
        image = coco.loadImgs(image_id)[0]
        image_name = os.path.join(args.img_root, image['file_name'])
        ann_ids = coco.getAnnIds(image_id)

        # make person bounding boxes
        person_bboxes = []
        for ann_id in ann_ids:
            ann = coco.anns[ann_id]
            # bbox format is 'xywh'
            bbox = ann['bbox']
            person_bboxes.append(bbox)

        # test a single image, with a list of bboxes
        pose_results, returned_outputs = inference_top_down_pose_model(
            pose_model,
            image_name,
            person_bboxes,
            bbox_thr=args.bbox_thr,
            format='xywh',
            dataset=dataset,
            return_heatmap=return_heatmap,
            outputs=output_layer_names)

        if args.out_img_root == '':
            out_file = None
        else:
            os.makedirs(args.out_img_root, exist_ok=True)
            out_file = os.path.join(args.out_img_root, f'vis_{i}.jpg')

        vis_pose_result(pose_model,
                        image_name,
                        pose_results,
                        dataset=dataset,
                        kpt_score_thr=args.kpt_thr,
                        show=args.show,
                        out_file=out_file)
示例#26
0
def test_pose_tracking_demo():
    # COCO demo
    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(
        'configs/top_down/resnet/coco/res50_coco_256x192.py',
        None,
        device='cpu')
    image_name = 'tests/data/coco/000000000785.jpg'
    # test a single image, with a list of bboxes.
    pose_results, _ = inference_top_down_pose_model(pose_model,
                                                    image_name,
                                                    [[50, 50, 50, 100]],
                                                    format='xywh')
    pose_results, next_id = get_track_id(pose_results, [], next_id=0)
    # show the results
    vis_pose_tracking_result(pose_model, image_name, pose_results)
    pose_results_last = pose_results

    # AIC demo
    pose_model = init_pose_model(
        'configs/top_down/resnet/aic/res50_aic_256x192.py', None, device='cpu')
    image_name = 'tests/data/aic/054d9ce9201beffc76e5ff2169d2af2f027002ca.jpg'
    # test a single image, with a list of bboxes.
    pose_results, _ = inference_top_down_pose_model(
        pose_model,
        image_name, [[50, 50, 50, 100]],
        format='xywh',
        dataset='TopDownAicDataset')
    pose_results, next_id = get_track_id(pose_results, pose_results_last,
                                         next_id)
    # show the results
    vis_pose_tracking_result(pose_model,
                             image_name,
                             pose_results,
                             dataset='TopDownAicDataset')

    # OneHand10K demo
    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(
        'configs/hand/resnet/onehand10k/res50_onehand10k_256x256.py',
        None,
        device='cpu')
    image_name = 'tests/data/onehand10k/9.jpg'
    # test a single image, with a list of bboxes.
    pose_results, _ = inference_top_down_pose_model(
        pose_model,
        image_name, [[10, 10, 30, 30]],
        format='xywh',
        dataset='OneHand10KDataset')
    pose_results, next_id = get_track_id(pose_results, pose_results_last,
                                         next_id)
    # show the results
    vis_pose_tracking_result(pose_model,
                             image_name,
                             pose_results,
                             dataset='OneHand10KDataset')

    # InterHand2D demo
    pose_model = init_pose_model(
        'configs/hand/resnet/interhand2d/res50_interhand2d_all_256x256.py',
        None,
        device='cpu')
    image_name = 'tests/data/interhand2d/image2017.jpg'
    # test a single image, with a list of bboxes.
    pose_results, _ = inference_top_down_pose_model(
        pose_model,
        image_name, [[50, 50, 0, 0]],
        format='xywh',
        dataset='InterHand2DDataset')
    pose_results, next_id = get_track_id(pose_results, [], next_id=0)
    # show the results
    vis_pose_tracking_result(pose_model,
                             image_name,
                             pose_results,
                             dataset='InterHand2DDataset')
    pose_results_last = pose_results

    # MPII demo
    pose_model = init_pose_model(
        'configs/top_down/resnet/mpii/res50_mpii_256x256.py',
        None,
        device='cpu')
    image_name = 'tests/data/mpii/004645041.jpg'
    # test a single image, with a list of bboxes.
    pose_results, _ = inference_top_down_pose_model(
        pose_model,
        image_name, [[50, 50, 0, 0]],
        format='xywh',
        dataset='TopDownMpiiDataset')
    pose_results, next_id = get_track_id(pose_results, pose_results_last,
                                         next_id)
    # show the results
    vis_pose_tracking_result(pose_model,
                             image_name,
                             pose_results,
                             dataset='TopDownMpiiDataset')

    with pytest.raises(NotImplementedError):
        vis_pose_tracking_result(pose_model,
                                 image_name,
                                 pose_results,
                                 dataset='test')
示例#27
0
def main():
    """Visualize the demo images.

    Using mmdet to detect the human.
    """
    parser = ArgumentParser()
    parser.add_argument('pose_config', help='Config file for pose')
    parser.add_argument('pose_checkpoint', help='Checkpoint file for pose')
    parser.add_argument('--video-path', type=str, help='Video path')
    parser.add_argument('--show',
                        action='store_true',
                        default=False,
                        help='whether to show visualizations.')
    parser.add_argument('--out-video-root',
                        default='',
                        help='Root of the output video file. '
                        'Default not saving the visualization video.')
    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device used for inference')
    parser.add_argument('--kpt-thr',
                        type=float,
                        default=0.3,
                        help='Keypoint score threshold')
    parser.add_argument('--radius',
                        type=int,
                        default=4,
                        help='Keypoint radius for visualization')
    parser.add_argument('--thickness',
                        type=int,
                        default=1,
                        help='Link thickness for visualization')

    assert has_face_det, 'Please install face_recognition to run the demo. '\
                         '"pip install face_recognition", For more details, '\
                         'see https://github.com/ageitgey/face_recognition'

    args = parser.parse_args()

    assert args.show or (args.out_video_root != '')

    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(args.pose_config,
                                 args.pose_checkpoint,
                                 device=args.device.lower())

    dataset = pose_model.cfg.data['test']['type']
    dataset_info = pose_model.cfg.data['test'].get('dataset_info', None)
    if dataset_info is None:
        warnings.warn(
            'Please set `dataset_info` in the config.'
            'Check https://github.com/open-mmlab/mmpose/pull/663 for details.',
            DeprecationWarning)
    else:
        dataset_info = DatasetInfo(dataset_info)

    cap = cv2.VideoCapture(args.video_path)
    assert cap.isOpened(), f'Faild to load video file {args.video_path}'

    if args.out_video_root == '':
        save_out_video = False
    else:
        os.makedirs(args.out_video_root, exist_ok=True)
        save_out_video = True

    if save_out_video:
        fps = cap.get(cv2.CAP_PROP_FPS)
        size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
                int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        videoWriter = cv2.VideoWriter(
            os.path.join(args.out_video_root,
                         f'vis_{os.path.basename(args.video_path)}'), fourcc,
            fps, size)

    # optional
    return_heatmap = False

    # e.g. use ('backbone', ) to return backbone feature
    output_layer_names = None

    while (cap.isOpened()):
        flag, img = cap.read()
        if not flag:
            break

        face_det_results = face_recognition.face_locations(
            cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
        face_results = process_face_det_results(face_det_results)

        # test a single image, with a list of bboxes.
        pose_results, returned_outputs = inference_top_down_pose_model(
            pose_model,
            img,
            face_results,
            bbox_thr=None,
            format='xyxy',
            dataset=dataset,
            dataset_info=dataset_info,
            return_heatmap=return_heatmap,
            outputs=output_layer_names)

        # show the results
        vis_img = vis_pose_result(pose_model,
                                  img,
                                  pose_results,
                                  radius=args.radius,
                                  thickness=args.thickness,
                                  dataset=dataset,
                                  dataset_info=dataset_info,
                                  kpt_score_thr=args.kpt_thr,
                                  show=False)

        if args.show:
            cv2.imshow('Image', vis_img)

        if save_out_video:
            videoWriter.write(vis_img)

        if args.show and cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    if save_out_video:
        videoWriter.release()
    if args.show:
        cv2.destroyAllWindows()
示例#28
0
def main():
    """Visualize the demo images."""
    parser = ArgumentParser()
    parser.add_argument('pose_config', help='Config file for pose')
    parser.add_argument('pose_checkpoint', help='Checkpoint file for pose')
    parser.add_argument('--video-path', type=str, help='Video path')
    parser.add_argument('--show',
                        action='store_true',
                        default=False,
                        help='whether to show visualizations.')
    parser.add_argument('--out-video-root',
                        default='',
                        help='Root of the output video file. '
                        'Default not saving the visualization video.')
    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device used for inference')
    parser.add_argument('--kpt-thr',
                        type=float,
                        default=0.3,
                        help='Keypoint score threshold')
    parser.add_argument('--pose-nms-thr',
                        type=float,
                        default=0.9,
                        help='OKS threshold for pose NMS')
    parser.add_argument('--radius',
                        type=int,
                        default=4,
                        help='Keypoint radius for visualization')
    parser.add_argument('--thickness',
                        type=int,
                        default=1,
                        help='Link thickness for visualization')

    args = parser.parse_args()

    assert args.show or (args.out_video_root != '')

    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(args.pose_config,
                                 args.pose_checkpoint,
                                 device=args.device.lower())

    dataset = pose_model.cfg.data['test']['type']
    dataset_info = pose_model.cfg.data['test'].get('dataset_info', None)
    if dataset_info is None:
        warnings.warn(
            'Please set `dataset_info` in the config.'
            'Check https://github.com/open-mmlab/mmpose/pull/663 for details.',
            DeprecationWarning)
        assert (dataset == 'BottomUpCocoDataset')
    else:
        dataset_info = DatasetInfo(dataset_info)

    # read video
    video = mmcv.VideoReader(args.video_path)
    assert video.opened, f'Faild to load video file {args.video_path}'

    if args.out_video_root == '':
        save_out_video = False
    else:
        os.makedirs(args.out_video_root, exist_ok=True)
        save_out_video = True

    if save_out_video:
        fps = video.fps
        size = (video.width, video.height)
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        videoWriter = cv2.VideoWriter(
            os.path.join(args.out_video_root,
                         f'vis_{os.path.basename(args.video_path)}'), fourcc,
            fps, size)

    # optional
    return_heatmap = False

    # e.g. use ('backbone', ) to return backbone feature
    output_layer_names = None

    print('Running inference...')
    for _, cur_frame in enumerate(mmcv.track_iter_progress(video)):
        pose_results, _ = inference_bottom_up_pose_model(
            pose_model,
            cur_frame,
            dataset=dataset,
            dataset_info=dataset_info,
            pose_nms_thr=args.pose_nms_thr,
            return_heatmap=return_heatmap,
            outputs=output_layer_names)

        # show the results
        vis_frame = vis_pose_result(pose_model,
                                    cur_frame,
                                    pose_results,
                                    radius=args.radius,
                                    thickness=args.thickness,
                                    dataset=dataset,
                                    dataset_info=dataset_info,
                                    kpt_score_thr=args.kpt_thr,
                                    show=False)

        if args.show:
            cv2.imshow('Image', vis_frame)

        if save_out_video:
            videoWriter.write(vis_frame)

        if args.show and cv2.waitKey(1) & 0xFF == ord('q'):
            break

    if save_out_video:
        videoWriter.release()
    if args.show:
        cv2.destroyAllWindows()
示例#29
0
def main():
    """Visualize the demo images.

    Using mmdet to detect the human.
    """
    parser = ArgumentParser()
    parser.add_argument('det_config', help='Config file for detection')
    parser.add_argument('det_checkpoint', help='Checkpoint file for detection')
    parser.add_argument('pose_config', help='Config file for pose')
    parser.add_argument('pose_checkpoint', help='Checkpoint file for pose')
    parser.add_argument('--video-path', type=str, help='Video path')
    parser.add_argument('--show',
                        action='store_true',
                        default=False,
                        help='whether to show visualizations.')
    parser.add_argument('--out-video-root',
                        default='',
                        help='Root of the output video file. '
                        'Default not saving the visualization video.')
    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device used for inference')
    parser.add_argument('--bbox-thr',
                        type=float,
                        default=0.3,
                        help='Bounding box score threshold')
    parser.add_argument('--kpt-thr',
                        type=float,
                        default=0.3,
                        help='Keypoint score threshold')
    parser.add_argument('--iou-thr',
                        type=float,
                        default=0.3,
                        help='IoU score threshold')

    assert has_mmdet, 'Please install mmdet to run the demo.'

    args = parser.parse_args()

    assert args.show or (args.out_video_root != '')
    assert args.det_config is not None
    assert args.det_checkpoint is not None

    det_model = init_detector(args.det_config,
                              args.det_checkpoint,
                              device=args.device.lower())
    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(args.pose_config,
                                 args.pose_checkpoint,
                                 device=args.device.lower())

    dataset = pose_model.cfg.data['test']['type']

    cap = cv2.VideoCapture(args.video_path)

    if args.out_video_root == '':
        save_out_video = False
    else:
        os.makedirs(args.out_video_root, exist_ok=True)
        save_out_video = True

    if save_out_video:
        fps = cap.get(cv2.CAP_PROP_FPS)
        size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
                int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        videoWriter = cv2.VideoWriter(
            os.path.join(args.out_video_root,
                         f'vis_{os.path.basename(args.video_path)}'), fourcc,
            fps, size)

    # optional
    return_heatmap = False

    # e.g. use ('backbone', ) to return backbone feature
    output_layer_names = None

    next_id = 0
    pose_results = []
    while (cap.isOpened()):
        pose_results_last = pose_results

        flag, img = cap.read()
        if not flag:
            break
        # test a single image, the resulting box is (x1, y1, x2, y2)
        mmdet_results = inference_detector(det_model, img)

        # keep the person class bounding boxes.
        person_results = process_mmdet_results(mmdet_results)

        # test a single image, with a list of bboxes.
        pose_results, returned_outputs = inference_top_down_pose_model(
            pose_model,
            img,
            person_results,
            bbox_thr=args.bbox_thr,
            format='xyxy',
            dataset=dataset,
            return_heatmap=return_heatmap,
            outputs=output_layer_names)

        # get track id for each person instance
        pose_results, next_id = get_track_id(pose_results,
                                             pose_results_last,
                                             next_id,
                                             iou_thr=args.iou_thr)

        # show the results
        vis_img = vis_pose_tracking_result(pose_model,
                                           img,
                                           pose_results,
                                           dataset=dataset,
                                           kpt_score_thr=args.kpt_thr,
                                           show=False)

        if args.show:
            cv2.imshow('Image', vis_img)

        if save_out_video:
            videoWriter.write(vis_img)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    if save_out_video:
        videoWriter.release()
    cv2.destroyAllWindows()
示例#30
0
def main():
    """Visualize the demo images.

    Using mmdet to detect the human.
    """
    parser = ArgumentParser()
    parser.add_argument('det_config', help='Config file for detection')
    parser.add_argument('det_checkpoint', help='Checkpoint file for detection')
    parser.add_argument('pose_config', help='Config file for pose')
    parser.add_argument('pose_checkpoint', help='Checkpoint file for pose')
    parser.add_argument('--video-path', type=str, help='Video path')
    parser.add_argument('--show',
                        action='store_true',
                        default=False,
                        help='whether to show visualizations.')
    parser.add_argument('--out-video-root',
                        default='',
                        help='Root of the output video file. '
                        'Default not saving the visualization video.')
    parser.add_argument('--device',
                        default='cpu',
                        help='Device used for inference')
    parser.add_argument('--bbox-thr',
                        type=float,
                        default=0.3,
                        help='Bounding box score threshold')
    parser.add_argument('--kpt-thr',
                        type=float,
                        default=0.3,
                        help='Keypoint score threshold')

    args = parser.parse_args()

    assert args.show or (args.out_video_root != '')
    assert args.det_config is not None
    assert args.det_checkpoint is not None

    det_model = init_detector(args.det_config,
                              args.det_checkpoint,
                              device=args.device)
    print('loaded detection model')
    # build the pose model from a config file and a checkpoint file
    print('pose config: {0} \npose checkpoint: {1}'.format(
        args.pose_config, args.pose_checkpoint))
    pose_model = init_pose_model(args.pose_config,
                                 args.pose_checkpoint,
                                 device=args.device)
    print('loaded poes model')

    dataset = pose_model.cfg.data['test']['type']

    print(dataset)

    cap = cv2.VideoCapture(args.video_path)

    print('loaded video')

    if args.out_video_root == '':
        save_out_video = False
    else:
        os.makedirs(args.out_video_root, exist_ok=True)
        save_out_video = True
        print('save path: {0}'.format(args.out_video_root))

    if save_out_video:
        fps = cap.get(cv2.CAP_PROP_FPS)
        size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
                int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        videoWriter = cv2.VideoWriter(
            os.path.join(args.out_video_root,
                         f'vis_{os.path.basename(args.video_path)}'), fourcc,
            fps, size)

    count = 0
    t0 = time.perf_counter()
    while (cap.isOpened()):
        t1 = time.perf_counter()
        flag, img = cap.read()

        if not flag:
            break
        # test a single image, the resulting box is (x1, y1, x2, y2)
        det_results = inference_detector(det_model, img)
        # keep the person class bounding boxes.
        person_bboxes = det_results[0].copy()

        # test a single image, with a list of bboxes.
        pose_results = inference_top_down_pose_model(pose_model,
                                                     img,
                                                     person_bboxes,
                                                     bbox_thr=args.bbox_thr,
                                                     format='xyxy',
                                                     dataset=dataset)

        count += 1
        t = time.perf_counter()
        print('Frame {0} analysed in {1} secs. Total time: {2} secs\
                '.format(count, t - t1, t - t0))

        # show the results
        vis_img = vis_pose_result(pose_model,
                                  img,
                                  pose_results,
                                  dataset=dataset,
                                  kpt_score_thr=args.kpt_thr,
                                  show=False)

        if args.show or count == 3:
            cv2.imshow('Image', vis_img)

        if save_out_video:
            videoWriter.write(vis_img)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    if save_out_video:
        videoWriter.release()
    cv2.destroyAllWindows()