def main():
    """Visualize the demo images.

    Using mmdet to detect the human.
    """
    parser = ArgumentParser()
    parser.add_argument('det_config', help='Config file for detection')
    parser.add_argument('det_checkpoint', help='Checkpoint file for detection')
    parser.add_argument('pose_config', help='Config file for pose')
    parser.add_argument('pose_checkpoint', help='Checkpoint file for pose')
    parser.add_argument('--img-root', type=str, default='', help='Image root')
    parser.add_argument('--img', type=str, default='', help='Image file')
    parser.add_argument('--show',
                        action='store_true',
                        default=False,
                        help='whether to show img')
    parser.add_argument('--out-img-root',
                        type=str,
                        default='',
                        help='root of the output img file. '
                        'Default not saving the visualization images.')
    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device used for inference')
    parser.add_argument('--bbox-thr',
                        type=float,
                        default=0.3,
                        help='Bounding bbox score threshold')
    parser.add_argument('--kpt-thr',
                        type=float,
                        default=0.3,
                        help='Keypoint score threshold')

    args = parser.parse_args()

    skeleton = [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12],
                [7, 13], [6, 7], [6, 8], [7, 9], [8, 10], [9, 11], [2, 3],
                [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]]

    assert args.show or (args.out_img_root != '')
    assert args.img != ''
    assert 'cuda' in args.device
    assert args.det_config is not None
    assert args.det_checkpoint is not None

    det_model = init_detector(args.det_config,
                              args.det_checkpoint,
                              device=args.device)
    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(args.pose_config,
                                 args.pose_checkpoint,
                                 device=args.device)

    image_name = os.path.join(args.img_root, args.img)

    # test a single image, the resulting box is (x1, y1, x2, y2)
    det_results = inference_detector(det_model, image_name)

    # keep the person class bounding boxes. (FasterRCNN)
    person_bboxes = det_results[0].copy()

    # test a single image, with a list of bboxes.
    pose_results = inference_pose_model(pose_model,
                                        image_name,
                                        person_bboxes,
                                        bbox_thr=args.bbox_thr,
                                        format='xyxy')

    if args.out_img_root == '':
        out_file = None
    else:
        out_file = os.path.join(args.out_img_root, f'vis_{args.img}')

    # show the results
    vis_pose_result(pose_model,
                    image_name,
                    pose_results,
                    skeleton=skeleton,
                    kpt_score_thr=args.kpt_thr,
                    show=args.show,
                    out_file=out_file)
Exemple #2
0
def main():
    """Visualize the demo images.

    Using mmdet to detect the human.
    """
    parser = ArgumentParser()
    parser.add_argument('det_config', help='Config file for detection')
    parser.add_argument('det_checkpoint', help='Checkpoint file for detection')
    parser.add_argument('pose_config', help='Config file for pose')
    parser.add_argument('pose_checkpoint', help='Checkpoint file for pose')
    parser.add_argument('--video-path', type=str, help='Video path')
    parser.add_argument('--show',
                        action='store_true',
                        default=False,
                        help='whether to show visualizations.')
    parser.add_argument('--out-video-root',
                        default='',
                        help='Root of the output video file. '
                        'Default not saving the visualization video.')
    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device used for inference')
    parser.add_argument('--bbox-thr',
                        type=float,
                        default=0.3,
                        help='Bounding box score threshold')
    parser.add_argument('--kpt-thr',
                        type=float,
                        default=0.3,
                        help='Keypoint score threshold')

    args = parser.parse_args()

    skeleton = [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12],
                [7, 13], [6, 7], [6, 8], [7, 9], [8, 10], [9, 11], [2, 3],
                [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]]

    assert args.show or (args.out_video_root != '')
    assert 'cuda' in args.device
    assert args.det_config is not None
    assert args.det_checkpoint is not None

    det_model = init_detector(args.det_config,
                              args.det_checkpoint,
                              device=args.device)
    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(args.pose_config,
                                 args.pose_checkpoint,
                                 device=args.device)

    cap = cv2.VideoCapture(args.video_path)

    if args.out_video_root == '':
        save_out_video = False
    else:
        os.makedirs(args.out_video_root, exist_ok=True)
        save_out_video = True

    if save_out_video:
        fps = cap.get(cv2.CAP_PROP_FPS)
        size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
                int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        videoWriter = cv2.VideoWriter(
            os.path.join(args.out_video_root,
                         f'vis_{os.path.basename(args.video_path)}'), fourcc,
            fps, size)

    while (cap.isOpened()):
        flag, img = cap.read()
        if not flag:
            break
        # test a single image, the resulting box is (x1, y1, x2, y2)
        det_results = inference_detector(det_model, img)
        # keep the person class bounding boxes.
        person_bboxes = det_results[0].copy()

        # test a single image, with a list of bboxes.
        pose_results = inference_pose_model(pose_model,
                                            img,
                                            person_bboxes,
                                            bbox_thr=args.bbox_thr,
                                            format='xyxy')

        # show the results
        vis_img = vis_pose_result(pose_model,
                                  img,
                                  pose_results,
                                  skeleton=skeleton,
                                  kpt_score_thr=args.kpt_thr,
                                  show=False)

        if args.show:
            cv2.imshow('Image', vis_img)

        if save_out_video:
            videoWriter.write(vis_img)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    if save_out_video:
        videoWriter.release()
    cv2.destroyAllWindows()
Exemple #3
0
# In[4]:

result = pickle.load(open("../data/results_bbox_test.pkl", 'rb'))
len(result)

# In[5]:

final_result = []
for idx, (img, r) in tqdm(enumerate(zip(paths, result))):

    person_bboxes = r[
        -1]  #[[128.54112   ,  64.24658   , 143.41219   , 156.22845   ,0.98622096]]

    pose_results = inference_pose_model(pose_model,
                                        img,
                                        person_bboxes,
                                        format='xyxy')

    final_result.append(pose_results)

#     palette = np.array([[255, 128, 0], [255, 153, 51], [255, 178, 102],
#                         [230, 230, 0], [255, 153, 255], [153, 204, 255],
#                         [255, 102, 255], [255, 51, 255], [102, 178, 255],
#                         [51, 153, 255], [255, 153, 153], [255, 102, 102],
#                         [255, 51, 51], [153, 255, 153], [102, 255, 102],
#                         [51, 255, 51], [0, 255, 0]])

#     pose_limb_color = palette[[
#         0, 0, 0, 0, 7, 7, 7, 9, 9, 9, 9, 9, 16, 16, 16, 16, 16, 16, 16
#     ]]
#     pose_kpt_color = palette[[
def main():
    """Visualize the demo images.

    Using mmdet to detect the human.
    """
    parser = ArgumentParser()
    parser.add_argument('det_config', help='Config file for detection')
    parser.add_argument('det_checkpoint', help='Checkpoint file for detection')
    parser.add_argument('pose_config', help='Config file for pose')
    parser.add_argument('pose_checkpoint', help='Checkpoint file for pose')
    parser.add_argument('--img_prefix',
                        type=str,
                        default='',
                        help='Image prefix')
    parser.add_argument('--img', type=str, default='', help='Image file')

    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device used for inference')
    parser.add_argument('--bbox_thr',
                        type=float,
                        default=0.3,
                        help='bbox score threshold')
    parser.add_argument('--kpt_thr',
                        type=float,
                        default=0.3,
                        help='keypoint score threshold')
    args = parser.parse_args()

    skeleton = [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12],
                [7, 13], [6, 7], [6, 8], [7, 9], [8, 10], [9, 11], [2, 3],
                [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]]

    assert args.img != ''
    assert args.det_config is not None
    assert args.det_checkpoint is not None

    det_model = init_detector(args.det_config,
                              args.det_checkpoint,
                              device=args.device)
    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(args.pose_config,
                                 args.pose_checkpoint,
                                 device=args.device)

    image_name = os.path.join(args.img_prefix, args.img)

    # test a single image, the resulting box is (x1, y1, x2, y2)
    det_results = inference_detector(det_model, image_name)

    # keep the person class bounding boxes.
    person_bboxes = det_results[0][0].copy()

    # test a single image, with a list of bboxes.
    pose_results = inference_pose_model(pose_model,
                                        image_name,
                                        person_bboxes,
                                        bbox_thr=args.bbox_thr,
                                        format='xyxy')

    # show the results
    show_pose_result(pose_model,
                     image_name,
                     pose_results,
                     skeleton=skeleton,
                     kpt_score_thr=args.kpt_thr)
Exemple #5
0
def main():
    """Visualize the demo images."""
    parser = ArgumentParser()
    parser.add_argument('pose_config', help='Config file for detection')
    parser.add_argument('pose_checkpoint', help='Checkpoint file')
    parser.add_argument('--img_prefix',
                        type=str,
                        default='',
                        help='Image prefix')
    parser.add_argument('--json_file',
                        type=str,
                        default='',
                        help='Json file containing image info.')
    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device used for inference')
    parser.add_argument('--kpt_thr',
                        type=float,
                        default=0.3,
                        help='box score threshold')
    args = parser.parse_args()

    skeleton = [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12],
                [7, 13], [6, 7], [6, 8], [7, 9], [8, 10], [9, 11], [2, 3],
                [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]]

    from pycocotools.coco import COCO
    coco = COCO(args.json_file)
    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(args.pose_config,
                                 args.pose_checkpoint,
                                 device=args.device)

    img_keys = list(coco.imgs.keys())

    # process each image
    for i in tqdm(range(len(img_keys))):
        # get bounding box annotations
        image_id = img_keys[i]
        image = coco.loadImgs(image_id)[0]
        image_name = os.path.join(args.img_prefix, image['file_name'])
        ann_ids = coco.getAnnIds(image_id)

        # make person bounding boxes
        person_bboxes = []
        for ann_id in ann_ids:
            ann = coco.anns[ann_id]
            # bbox format is 'xywh'
            bbox = ann['bbox']
            person_bboxes.append(bbox)

        # test a single image, with a list of bboxes.
        pose_results = inference_pose_model(pose_model,
                                            image_name,
                                            person_bboxes,
                                            format='xywh')

        # show the results
        show_pose_result(pose_model,
                         image_name,
                         pose_results,
                         skeleton=skeleton,
                         kpt_score_thr=args.kpt_thr)
Exemple #6
0
def main():
    """Visualize the demo images.

    Require the json_file containing boxes.
    """
    parser = ArgumentParser()
    parser.add_argument('pose_config', help='Config file for detection')
    parser.add_argument('pose_checkpoint', help='Checkpoint file')
    parser.add_argument('--img-root', type=str, default='', help='Image root')
    parser.add_argument('--json-file',
                        type=str,
                        default='',
                        help='Json file containing image info.')
    parser.add_argument('--show',
                        action='store_true',
                        default=False,
                        help='whether to show img')
    parser.add_argument('--out-img-root',
                        type=str,
                        default='',
                        help='Root of the output img file. '
                        'Default not saving the visualization images.')
    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device used for inference')
    parser.add_argument('--kpt-thr',
                        type=float,
                        default=0.3,
                        help='Keypoint score threshold')

    args = parser.parse_args()

    assert args.show or (args.out_img_root != '')
    assert 'cuda' in args.device

    skeleton = [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12],
                [7, 13], [6, 7], [6, 8], [7, 9], [8, 10], [9, 11], [2, 3],
                [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]]

    from pycocotools.coco import COCO
    coco = COCO(args.json_file)
    # build the pose model from a config file and a checkpoint file
    pose_model = init_pose_model(args.pose_config,
                                 args.pose_checkpoint,
                                 device=args.device)

    img_keys = list(coco.imgs.keys())

    # process each image
    for i in range(len(img_keys)):
        # get bounding box annotations
        image_id = img_keys[i]
        image = coco.loadImgs(image_id)[0]
        image_name = os.path.join(args.img_root, image['file_name'])
        ann_ids = coco.getAnnIds(image_id)

        # make person bounding boxes
        person_bboxes = []
        for ann_id in ann_ids:
            ann = coco.anns[ann_id]
            # bbox format is 'xywh'
            bbox = ann['bbox']
            person_bboxes.append(bbox)

        # test a single image, with a list of bboxes.
        pose_results = inference_pose_model(pose_model,
                                            image_name,
                                            person_bboxes,
                                            format='xywh')

        if args.out_img_root == '':
            out_file = None
        else:
            os.makedirs(args.out_img_root, exist_ok=True)
            out_file = os.path.join(args.out_img_root, f'vis_{i}.jpg')

        # show the results
        vis_pose_result(pose_model,
                        image_name,
                        pose_results,
                        skeleton=skeleton,
                        kpt_score_thr=args.kpt_thr,
                        show=args.show,
                        out_file=out_file)