コード例 #1
0
ファイル: webcam_demo.py プロジェクト: wusize/mmpose
def inference_pose():
    print('Thread "pose" started')
    stop_watch = StopWatch(window=10)

    while True:
        while len(det_result_queue) < 1:
            time.sleep(0.001)
        with det_result_queue_mutex:
            ts_input, frame, t_info, mmdet_results = det_result_queue.popleft()

        pose_results_list = []
        for model_info, pose_history in zip(pose_model_list,
                                            pose_history_list):
            model_name = model_info['name']
            pose_model = model_info['model']
            cat_ids = model_info['cat_ids']
            pose_results_last = pose_history['pose_results_last']
            next_id = pose_history['next_id']

            with stop_watch.timeit(model_name):
                # process mmdet results
                det_results = process_mmdet_results(
                    mmdet_results,
                    class_names=det_model.CLASSES,
                    cat_ids=cat_ids)

                # inference pose model
                dataset_name = pose_model.cfg.data['test']['type']
                pose_results, _ = inference_top_down_pose_model(
                    pose_model,
                    frame,
                    det_results,
                    bbox_thr=args.det_score_thr,
                    format='xyxy',
                    dataset=dataset_name)

                pose_results, next_id = get_track_id(pose_results,
                                                     pose_results_last,
                                                     next_id,
                                                     use_oks=False,
                                                     tracking_thr=0.3,
                                                     use_one_euro=True,
                                                     fps=None)

                pose_results_list.append(pose_results)

                # update pose history
                pose_history['pose_results_last'] = pose_results
                pose_history['next_id'] = next_id

        t_info += stop_watch.report_strings()
        with pose_result_queue_mutex:
            pose_result_queue.append((ts_input, t_info, pose_results_list))

        event_inference_done.set()
コード例 #2
0
def test_stopwatch():
    window_size = 5
    test_loop = 10
    outer_time = 100
    inner_time = 100

    stop_watch = StopWatch(window=window_size)
    for _ in range(test_loop):
        with stop_watch.timeit():
            time.sleep(outer_time / 1000.)
            with stop_watch.timeit('inner'):
                time.sleep(inner_time / 1000.)

    _ = stop_watch.report()
    _ = stop_watch.report_strings()
コード例 #3
0
ファイル: test_utils.py プロジェクト: wusize/mmpose
def test_stopwatch():
    window_size = 5
    test_loop = 10
    outer_time = 100
    inner_time = 100

    stop_watch = StopWatch(window=window_size)
    for _ in range(test_loop):
        with stop_watch.timeit():
            time.sleep(outer_time / 1000.)
            with stop_watch.timeit('inner'):
                time.sleep(inner_time / 1000.)

    report = stop_watch.report()
    _ = stop_watch.report_strings()

    np.testing.assert_allclose(report['_FPS_'],
                               outer_time + inner_time,
                               rtol=0.01)

    np.testing.assert_allclose(report['inner'], inner_time, rtol=0.01)
コード例 #4
0
ファイル: webcam_demo.py プロジェクト: wusize/mmpose
def inference_detection():
    print('Thread "det" started')
    stop_watch = StopWatch(window=10)
    min_interval = 1.0 / args.inference_fps
    _ts_last = None  # timestamp when last inference was done

    while True:
        while len(input_queue) < 1:
            time.sleep(0.001)
        with input_queue_mutex:
            ts_input, frame = input_queue.popleft()
        # inference detection
        with stop_watch.timeit('Det'):
            mmdet_results = inference_detector(det_model, frame)

        t_info = stop_watch.report_strings()
        with det_result_queue_mutex:
            det_result_queue.append((ts_input, frame, t_info, mmdet_results))

        # limit the inference FPS
        _ts = time.time()
        if _ts_last is not None and _ts - _ts_last < min_interval:
            time.sleep(min_interval - _ts + _ts_last)
        _ts_last = time.time()
コード例 #5
0
ファイル: webcam_demo.py プロジェクト: wusize/mmpose
def display():
    print('Thread "display" started')
    stop_watch = StopWatch(window=10)

    # initialize result status
    ts_inference = None  # timestamp of the latest inference result
    fps_inference = 0.  # infenrece FPS
    t_delay_inference = 0.  # inference result time delay
    pose_results_list = None  # latest inference result
    t_info = []  # upstream time information (list[str])

    # initialize visualization and output
    sunglasses_img = None  # resource image for sunglasses effect
    text_color = (228, 183, 61)  # text color to show time/system information
    vid_out = None  # video writer

    # show instructions
    print('Keyboard shortcuts: ')
    print('"v": Toggle the visualization of bounding boxes and poses.')
    print('"s": Toggle the sunglasses effect.')
    print('"b": Toggle the bug-eye effect.')
    print('"Q", "q" or Esc: Exit.')

    while True:
        with stop_watch.timeit('_FPS_'):
            # acquire a frame from buffer
            ts_input, frame = frame_buffer.get()
            # input ending signal
            if ts_input is None:
                break

            img = frame

            # get pose estimation results
            if len(pose_result_queue) > 0:
                with pose_result_queue_mutex:
                    _result = pose_result_queue.popleft()
                    _ts_input, t_info, pose_results_list = _result

                _ts = time.time()
                if ts_inference is not None:
                    fps_inference = 1.0 / (_ts - ts_inference)
                ts_inference = _ts
                t_delay_inference = (_ts - _ts_input) * 1000

            # visualize detection and pose results
            if pose_results_list is not None:
                for model_info, pose_results in zip(pose_model_list,
                                                    pose_results_list):
                    pose_model = model_info['model']
                    bbox_color = model_info['bbox_color']

                    dataset_name = pose_model.cfg.data['test']['type']

                    # show pose results
                    if args.show_pose:
                        img = vis_pose_result(pose_model,
                                              img,
                                              pose_results,
                                              radius=4,
                                              thickness=2,
                                              dataset=dataset_name,
                                              kpt_score_thr=args.kpt_thr,
                                              bbox_color=bbox_color)

                    # sunglasses effect
                    if args.sunglasses:
                        if dataset_name == 'TopDownCocoDataset':
                            left_eye_idx = 1
                            right_eye_idx = 2
                        elif dataset_name == 'AnimalPoseDataset':
                            left_eye_idx = 0
                            right_eye_idx = 1
                        else:
                            raise ValueError(
                                'Sunglasses effect does not support'
                                f'{dataset_name}')
                        if sunglasses_img is None:
                            # The image attributes to:
                            # https://www.vecteezy.com/free-vector/glass
                            # Glass Vectors by Vecteezy
                            sunglasses_img = cv2.imread(
                                'demo/resources/sunglasses.jpg')
                        img = apply_sunglasses_effect(img, pose_results,
                                                      sunglasses_img,
                                                      left_eye_idx,
                                                      right_eye_idx)
                    # bug-eye effect
                    if args.bugeye:
                        if dataset_name == 'TopDownCocoDataset':
                            left_eye_idx = 1
                            right_eye_idx = 2
                        elif dataset_name == 'AnimalPoseDataset':
                            left_eye_idx = 0
                            right_eye_idx = 1
                        else:
                            raise ValueError('Bug-eye effect does not support'
                                             f'{dataset_name}')
                        img = apply_bugeye_effect(img, pose_results,
                                                  left_eye_idx, right_eye_idx)

            # delay control
            if args.display_delay > 0:
                t_sleep = args.display_delay * 0.001 - (time.time() - ts_input)
                if t_sleep > 0:
                    time.sleep(t_sleep)
            t_delay = (time.time() - ts_input) * 1000

            # show time information
            t_info_display = stop_watch.report_strings()  # display fps
            t_info_display.append(f'Inference FPS: {fps_inference:>5.1f}')
            t_info_display.append(f'Delay: {t_delay:>3.0f}')
            t_info_display.append(
                f'Inference Delay: {t_delay_inference:>3.0f}')
            t_info_str = ' | '.join(t_info_display + t_info)
            cv2.putText(img, t_info_str, (20, 20), cv2.FONT_HERSHEY_DUPLEX,
                        0.3, text_color, 1)
            # collect system information
            sys_info = [
                f'RES: {img.shape[1]}x{img.shape[0]}',
                f'Buffer: {frame_buffer.qsize()}/{frame_buffer.maxsize}'
            ]
            if psutil_proc is not None:
                sys_info += [
                    f'CPU: {psutil_proc.cpu_percent():.1f}%',
                    f'MEM: {psutil_proc.memory_percent():.1f}%'
                ]
            sys_info_str = ' | '.join(sys_info)
            cv2.putText(img, sys_info_str, (20, 40), cv2.FONT_HERSHEY_DUPLEX,
                        0.3, text_color, 1)

            # save the output video frame
            if args.out_video_file is not None:
                if vid_out is None:
                    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
                    fps = args.out_video_fps
                    frame_size = (img.shape[1], img.shape[0])
                    vid_out = cv2.VideoWriter(args.out_video_file, fourcc, fps,
                                              frame_size)

                vid_out.write(img)

            # display
            cv2.imshow('mmpose webcam demo', img)
            keyboard_input = cv2.waitKey(1)
            if keyboard_input in (27, ord('q'), ord('Q')):
                break
            elif keyboard_input == ord('s'):
                args.sunglasses = not args.sunglasses
            elif keyboard_input == ord('b'):
                args.bugeye = not args.bugeye
            elif keyboard_input == ord('v'):
                args.show_pose = not args.show_pose

    cv2.destroyAllWindows()
    if vid_out is not None:
        vid_out.release()
    event_exit.set()