Exemple #1
0
def main():
    args = build_argparser().parse_args()

    cap = open_images_capture(args.input, args.loop)
    next_frame_id = 1
    next_frame_id_to_show = 0

    metrics = PerformanceMetrics()
    render_metrics = PerformanceMetrics()
    video_writer = cv2.VideoWriter()

    plugin_config = get_user_config(args.device, args.num_streams,
                                    args.num_threads)
    model_adapter = OpenvinoAdapter(
        create_core(),
        args.model,
        device=args.device,
        plugin_config=plugin_config,
        max_num_requests=args.num_infer_requests,
        model_parameters={'input_layouts': args.layout})

    start_time = perf_counter()
    frame = cap.read()
    if frame is None:
        raise RuntimeError("Can't read an image from the input")

    config = {
        'target_size': args.tsize,
        'aspect_ratio': frame.shape[1] / frame.shape[0],
        'confidence_threshold': args.prob_threshold,
        'padding_mode': 'center' if args.architecture_type == 'higherhrnet'
        else None,  # the 'higherhrnet' and 'ae' specific
        'delta':
        0.5 if 'higherhrnet' else None,  # the 'higherhrnet' and 'ae' specific
    }
    model = ImageModel.create_model(ARCHITECTURES[args.architecture_type],
                                    model_adapter, config)
    model.log_layers_info()

    hpe_pipeline = AsyncPipeline(model)
    hpe_pipeline.submit_data(frame, 0, {
        'frame': frame,
        'start_time': start_time
    })

    output_transform = OutputTransform(frame.shape[:2], args.output_resolution)
    if args.output_resolution:
        output_resolution = output_transform.new_resolution
    else:
        output_resolution = (frame.shape[1], frame.shape[0])
    presenter = monitors.Presenter(
        args.utilization_monitors, 55,
        (round(output_resolution[0] / 4), round(output_resolution[1] / 8)))
    if args.output and not video_writer.open(args.output,
                                             cv2.VideoWriter_fourcc(*'MJPG'),
                                             cap.fps(), output_resolution):
        raise RuntimeError("Can't open video writer")

    while True:
        if hpe_pipeline.callback_exceptions:
            raise hpe_pipeline.callback_exceptions[0]
        # Process all completed requests
        results = hpe_pipeline.get_result(next_frame_id_to_show)
        if results:
            (poses, scores), frame_meta = results
            frame = frame_meta['frame']
            start_time = frame_meta['start_time']

            if len(poses) and args.raw_output_message:
                print_raw_results(poses, scores, next_frame_id_to_show)

            presenter.drawGraphs(frame)
            rendering_start_time = perf_counter()
            frame = draw_poses(frame, poses, args.prob_threshold,
                               output_transform)
            render_metrics.update(rendering_start_time)
            metrics.update(start_time, frame)
            if video_writer.isOpened() and (
                    args.output_limit <= 0
                    or next_frame_id_to_show <= args.output_limit - 1):
                video_writer.write(frame)
            next_frame_id_to_show += 1
            if not args.no_show:
                cv2.imshow('Pose estimation results', frame)
                key = cv2.waitKey(1)

                ESC_KEY = 27
                # Quit.
                if key in {ord('q'), ord('Q'), ESC_KEY}:
                    break
                presenter.handleKey(key)
            continue

        if hpe_pipeline.is_ready():
            # Get new image/frame
            start_time = perf_counter()
            frame = cap.read()
            if frame is None:
                break

            # Submit for inference
            hpe_pipeline.submit_data(frame, next_frame_id, {
                'frame': frame,
                'start_time': start_time
            })
            next_frame_id += 1

        else:
            # Wait for empty request
            hpe_pipeline.await_any()

    hpe_pipeline.await_all()
    if hpe_pipeline.callback_exceptions:
        raise hpe_pipeline.callback_exceptions[0]
    # Process completed requests
    for next_frame_id_to_show in range(next_frame_id_to_show, next_frame_id):
        results = hpe_pipeline.get_result(next_frame_id_to_show)
        (poses, scores), frame_meta = results
        frame = frame_meta['frame']
        start_time = frame_meta['start_time']

        if len(poses) and args.raw_output_message:
            print_raw_results(poses, scores, next_frame_id_to_show)

        presenter.drawGraphs(frame)
        rendering_start_time = perf_counter()
        frame = draw_poses(frame, poses, args.prob_threshold, output_transform)
        render_metrics.update(rendering_start_time)
        metrics.update(start_time, frame)
        if video_writer.isOpened() and (
                args.output_limit <= 0
                or next_frame_id_to_show <= args.output_limit - 1):
            video_writer.write(frame)
        if not args.no_show:
            cv2.imshow('Pose estimation results', frame)
            key = cv2.waitKey(1)

            ESC_KEY = 27
            # Quit.
            if key in {ord('q'), ord('Q'), ESC_KEY}:
                break
            presenter.handleKey(key)

    metrics.log_total()
    log_latency_per_stage(cap.reader_metrics.get_latency(),
                          hpe_pipeline.preprocess_metrics.get_latency(),
                          hpe_pipeline.inference_metrics.get_latency(),
                          hpe_pipeline.postprocess_metrics.get_latency(),
                          render_metrics.get_latency())
    for rep in presenter.reportMeans():
        log.info(rep)