def main():
    args = build_argparser().parse_args()

    cap = open_images_capture(args.input, args.loop)
    next_frame_id = 1
    next_frame_id_to_show = 0

    metrics = PerformanceMetrics()
    render_metrics = PerformanceMetrics()
    video_writer = cv2.VideoWriter()

    plugin_config = get_user_config(args.device, args.num_streams,
                                    args.num_threads)
    model_adapter = OpenvinoAdapter(
        create_core(),
        args.model,
        device=args.device,
        plugin_config=plugin_config,
        max_num_requests=args.num_infer_requests,
        model_parameters={'input_layouts': args.layout})

    start_time = perf_counter()
    frame = cap.read()
    if frame is None:
        raise RuntimeError("Can't read an image from the input")

    config = {
        'target_size': args.tsize,
        'aspect_ratio': frame.shape[1] / frame.shape[0],
        'confidence_threshold': args.prob_threshold,
        'padding_mode': 'center' if args.architecture_type == 'higherhrnet'
        else None,  # the 'higherhrnet' and 'ae' specific
        'delta': 0.5 if args.architecture_type == 'higherhrnet' else
        None,  # the 'higherhrnet' and 'ae' specific
    }
    model = ImageModel.create_model(ARCHITECTURES[args.architecture_type],
                                    model_adapter, config)
    model.log_layers_info()

    hpe_pipeline = AsyncPipeline(model)
    hpe_pipeline.submit_data(frame, 0, {
        'frame': frame,
        'start_time': start_time
    })

    output_transform = OutputTransform(frame.shape[:2], args.output_resolution)
    if args.output_resolution:
        output_resolution = output_transform.new_resolution
    else:
        output_resolution = (frame.shape[1], frame.shape[0])
    presenter = monitors.Presenter(
        args.utilization_monitors, 55,
        (round(output_resolution[0] / 4), round(output_resolution[1] / 8)))
    if args.output and not video_writer.open(args.output,
                                             cv2.VideoWriter_fourcc(*'MJPG'),
                                             cap.fps(), output_resolution):
        raise RuntimeError("Can't open video writer")

    while True:
        if hpe_pipeline.callback_exceptions:
            raise hpe_pipeline.callback_exceptions[0]
        # Process all completed requests
        results = hpe_pipeline.get_result(next_frame_id_to_show)
        if results:
            (poses, scores), frame_meta = results
            frame = frame_meta['frame']
            start_time = frame_meta['start_time']

            if len(poses) and args.raw_output_message:
                print_raw_results(poses, scores, next_frame_id_to_show)

            presenter.drawGraphs(frame)
            rendering_start_time = perf_counter()
            frame = draw_poses(frame, poses, args.prob_threshold,
                               output_transform)
            render_metrics.update(rendering_start_time)
            metrics.update(start_time, frame)
            if video_writer.isOpened() and (
                    args.output_limit <= 0
                    or next_frame_id_to_show <= args.output_limit - 1):
                video_writer.write(frame)
            next_frame_id_to_show += 1
            if not args.no_show:
                cv2.imshow('Pose estimation results', frame)
                key = cv2.waitKey(1)

                ESC_KEY = 27
                # Quit.
                if key in {ord('q'), ord('Q'), ESC_KEY}:
                    break
                presenter.handleKey(key)
            continue

        if hpe_pipeline.is_ready():
            # Get new image/frame
            start_time = perf_counter()
            frame = cap.read()
            if frame is None:
                break

            # Submit for inference
            hpe_pipeline.submit_data(frame, next_frame_id, {
                'frame': frame,
                'start_time': start_time
            })
            next_frame_id += 1

        else:
            # Wait for empty request
            hpe_pipeline.await_any()

    hpe_pipeline.await_all()
    if hpe_pipeline.callback_exceptions:
        raise hpe_pipeline.callback_exceptions[0]
    # Process completed requests
    for next_frame_id_to_show in range(next_frame_id_to_show, next_frame_id):
        results = hpe_pipeline.get_result(next_frame_id_to_show)
        (poses, scores), frame_meta = results
        frame = frame_meta['frame']
        start_time = frame_meta['start_time']

        if len(poses) and args.raw_output_message:
            print_raw_results(poses, scores, next_frame_id_to_show)

        presenter.drawGraphs(frame)
        rendering_start_time = perf_counter()
        frame = draw_poses(frame, poses, args.prob_threshold, output_transform)
        render_metrics.update(rendering_start_time)
        metrics.update(start_time, frame)
        if video_writer.isOpened() and (
                args.output_limit <= 0
                or next_frame_id_to_show <= args.output_limit - 1):
            video_writer.write(frame)
        if not args.no_show:
            cv2.imshow('Pose estimation results', frame)
            key = cv2.waitKey(1)

            ESC_KEY = 27
            # Quit.
            if key in {ord('q'), ord('Q'), ESC_KEY}:
                break
            presenter.handleKey(key)

    metrics.log_total()
    log_latency_per_stage(cap.reader_metrics.get_latency(),
                          hpe_pipeline.preprocess_metrics.get_latency(),
                          hpe_pipeline.inference_metrics.get_latency(),
                          hpe_pipeline.postprocess_metrics.get_latency(),
                          render_metrics.get_latency())
    for rep in presenter.reportMeans():
        log.info(rep)
Esempio n. 2
0
def main():
    args = build_argparser().parse_args()

    cap = open_images_capture(args.input, args.loop)
    next_frame_id = 1
    next_frame_id_to_show = 0

    metrics = PerformanceMetrics()
    render_metrics = PerformanceMetrics()
    video_writer = cv2.VideoWriter()

    if args.adapter == 'openvino':
        plugin_config = get_user_config(args.device, args.num_streams,
                                        args.num_threads)
        model_adapter = OpenvinoAdapter(
            create_core(),
            args.model,
            device=args.device,
            plugin_config=plugin_config,
            max_num_requests=args.num_infer_requests)
    elif args.adapter == 'ovms':
        model_adapter = OVMSAdapter(args.model)

    start_time = perf_counter()
    frame = cap.read()
    if frame is None:
        raise RuntimeError("Can't read an image from the input")

    model = Deblurring(model_adapter, preload=False)
    model.reshape(frame.shape)
    model.log_layers_info()

    pipeline = AsyncPipeline(model)

    pipeline.submit_data(frame, 0, {'frame': frame, 'start_time': start_time})

    presenter = monitors.Presenter(
        args.utilization_monitors, 55,
        (round(frame.shape[1] / 4), round(frame.shape[0] / 8)))
    if args.output and not video_writer.open(
            args.output, cv2.VideoWriter_fourcc(*'MJPG'), cap.fps(),
        (2 * frame.shape[1], frame.shape[0])):
        raise RuntimeError("Can't open video writer")

    while True:
        if pipeline.is_ready():
            # Get new image/frame
            start_time = perf_counter()
            frame = cap.read()
            if frame is None:
                break

            # Submit for inference
            pipeline.submit_data(frame, next_frame_id, {
                'frame': frame,
                'start_time': start_time
            })
            next_frame_id += 1
        else:
            # Wait for empty request
            pipeline.await_any()

        if pipeline.callback_exceptions:
            raise pipeline.callback_exceptions[0]
        # Process all completed requests
        results = pipeline.get_result(next_frame_id_to_show)
        if results:
            result_frame, frame_meta = results
            input_frame = frame_meta['frame']
            start_time = frame_meta['start_time']

            rendering_start_time = perf_counter()
            if input_frame.shape != result_frame.shape:
                input_frame = cv2.resize(
                    input_frame,
                    (result_frame.shape[1], result_frame.shape[0]))
            final_image = cv2.hconcat([input_frame, result_frame])
            render_metrics.update(rendering_start_time)

            presenter.drawGraphs(final_image)
            metrics.update(start_time, final_image)

            if video_writer.isOpened() and (
                    args.output_limit <= 0
                    or next_frame_id_to_show <= args.output_limit - 1):
                video_writer.write(final_image)
            next_frame_id_to_show += 1

            if not args.no_show:
                cv2.imshow('Deblurring Results', final_image)
                key = cv2.waitKey(1)
                if key == 27 or key == 'q' or key == 'Q':
                    break
                presenter.handleKey(key)

    pipeline.await_all()
    # Process completed requests
    for next_frame_id_to_show in range(next_frame_id_to_show, next_frame_id):
        results = pipeline.get_result(next_frame_id_to_show)
        while results is None:
            results = pipeline.get_result(next_frame_id_to_show)
        result_frame, frame_meta = results
        input_frame = frame_meta['frame']
        start_time = frame_meta['start_time']

        rendering_start_time = perf_counter()
        if input_frame.shape != result_frame.shape:
            input_frame = cv2.resize(
                input_frame, (result_frame.shape[1], result_frame.shape[0]))
        final_image = cv2.hconcat([input_frame, result_frame])
        render_metrics.update(rendering_start_time)

        presenter.drawGraphs(final_image)
        metrics.update(start_time, final_image)

        if video_writer.isOpened() and (
                args.output_limit <= 0
                or next_frame_id_to_show <= args.output_limit - 1):
            video_writer.write(final_image)

        if not args.no_show:
            cv2.imshow('Deblurring Results', final_image)
            key = cv2.waitKey(1)

    metrics.log_total()
    log_latency_per_stage(cap.reader_metrics.get_latency(),
                          pipeline.preprocess_metrics.get_latency(),
                          pipeline.inference_metrics.get_latency(),
                          pipeline.postprocess_metrics.get_latency(),
                          render_metrics.get_latency())
    for rep in presenter.reportMeans():
        log.info(rep)
def main():
    args = build_argparser().parse_args()

    cap = open_images_capture(args.input, args.loop)

    if args.adapter == 'openvino':
        plugin_config = get_user_config(args.device, args.num_streams,
                                        args.num_threads)
        model_adapter = OpenvinoAdapter(
            create_core(),
            args.model,
            device=args.device,
            plugin_config=plugin_config,
            max_num_requests=args.num_infer_requests,
            model_parameters={'input_layouts': args.layout})
    elif args.adapter == 'ovms':
        model_adapter = OVMSAdapter(args.model)

    model = SegmentationModel.create_model(args.architecture_type,
                                           model_adapter,
                                           {'path_to_labels': args.labels})
    if args.architecture_type == 'segmentation':
        visualizer = SegmentationVisualizer(args.colors)
    if args.architecture_type == 'salient_object_detection':
        visualizer = SaliencyMapVisualizer()
    model.log_layers_info()

    pipeline = AsyncPipeline(model)

    next_frame_id = 0
    next_frame_id_to_show = 0

    metrics = PerformanceMetrics()
    render_metrics = PerformanceMetrics()
    presenter = None
    output_transform = None
    video_writer = cv2.VideoWriter()
    only_masks = args.only_masks
    while True:
        if pipeline.is_ready():
            # Get new image/frame
            start_time = perf_counter()
            frame = cap.read()
            if frame is None:
                if next_frame_id == 0:
                    raise ValueError("Can't read an image from the input")
                break
            if next_frame_id == 0:
                output_transform = OutputTransform(frame.shape[:2],
                                                   args.output_resolution)
                if args.output_resolution:
                    output_resolution = output_transform.new_resolution
                else:
                    output_resolution = (frame.shape[1], frame.shape[0])
                presenter = monitors.Presenter(
                    args.utilization_monitors, 55,
                    (round(output_resolution[0] / 4),
                     round(output_resolution[1] / 8)))
                if args.output and not video_writer.open(
                        args.output, cv2.VideoWriter_fourcc(*'MJPG'),
                        cap.fps(), output_resolution):
                    raise RuntimeError("Can't open video writer")
            # Submit for inference
            pipeline.submit_data(frame, next_frame_id, {
                'frame': frame,
                'start_time': start_time
            })
            next_frame_id += 1
        else:
            # Wait for empty request
            pipeline.await_any()

        if pipeline.callback_exceptions:
            raise pipeline.callback_exceptions[0]
        # Process all completed requests
        results = pipeline.get_result(next_frame_id_to_show)
        if results:
            objects, frame_meta = results
            if args.raw_output_message:
                print_raw_results(objects, next_frame_id_to_show, model.labels)
            frame = frame_meta['frame']
            start_time = frame_meta['start_time']
            rendering_start_time = perf_counter()
            frame = render_segmentation(frame, objects, visualizer,
                                        output_transform, only_masks)
            render_metrics.update(rendering_start_time)
            presenter.drawGraphs(frame)
            metrics.update(start_time, frame)

            if video_writer.isOpened() and (
                    args.output_limit <= 0
                    or next_frame_id_to_show <= args.output_limit - 1):
                video_writer.write(frame)
            next_frame_id_to_show += 1

            if not args.no_show:
                cv2.imshow('Segmentation Results', frame)
                key = cv2.waitKey(1)
                if key == 27 or key == 'q' or key == 'Q':
                    break
                if key == 9:
                    only_masks = not only_masks
                presenter.handleKey(key)

    pipeline.await_all()
    if pipeline.callback_exceptions:
        raise pipeline.callback_exceptions[0]
    # Process completed requests
    for next_frame_id_to_show in range(next_frame_id_to_show, next_frame_id):
        results = pipeline.get_result(next_frame_id_to_show)
        objects, frame_meta = results
        if args.raw_output_message:
            print_raw_results(objects, next_frame_id_to_show, model.labels)
        frame = frame_meta['frame']
        start_time = frame_meta['start_time']

        rendering_start_time = perf_counter()
        frame = render_segmentation(frame, objects, visualizer,
                                    output_transform, only_masks)
        render_metrics.update(rendering_start_time)
        presenter.drawGraphs(frame)
        metrics.update(start_time, frame)

        if video_writer.isOpened() and (
                args.output_limit <= 0
                or next_frame_id_to_show <= args.output_limit - 1):
            video_writer.write(frame)

        if not args.no_show:
            cv2.imshow('Segmentation Results', frame)
            key = cv2.waitKey(1)

    metrics.log_total()
    log_latency_per_stage(cap.reader_metrics.get_latency(),
                          pipeline.preprocess_metrics.get_latency(),
                          pipeline.inference_metrics.get_latency(),
                          pipeline.postprocess_metrics.get_latency(),
                          render_metrics.get_latency())
    for rep in presenter.reportMeans():
        log.info(rep)
def main():
    args = build_argparser().parse_args()

    cap = open_images_capture(args.input, args.loop)

    target_bgr = open_images_capture(args.target_bgr, loop=True) if args.target_bgr else None

    if args.adapter == 'openvino':
        plugin_config = get_user_config(args.device, args.num_streams, args.num_threads)
        model_adapter = OpenvinoAdapter(create_core(), args.model, device=args.device, plugin_config=plugin_config,
                                        max_num_requests=args.num_infer_requests)
    elif args.adapter == 'ovms':
        model_adapter = OVMSAdapter(args.model)

    labels = ['__background__', 'person'] if args.labels is None else load_labels(args.labels)
    assert len(labels), 'The file with class labels is empty'

    configuration = {
        'confidence_threshold': args.prob_threshold,
        'resize_type': args.resize_type
    }

    model, need_bgr_input = get_model(model_adapter, configuration, args)

    input_bgr = open_images_capture(args.background, False).read() if need_bgr_input else None

    person_id = -1
    for i, label in enumerate(labels):
        if label == 'person':
            person_id = i
            break
    assert person_id >= 0, 'Person class did not find in labels list.'

    model.log_layers_info()

    pipeline = AsyncPipeline(model)

    next_frame_id = 0
    next_frame_id_to_show = 0

    metrics = PerformanceMetrics()
    render_metrics = PerformanceMetrics()
    presenter = None
    output_transform = None
    video_writer = cv2.VideoWriter()
    while True:
        if pipeline.is_ready():
            # Get new image/frame
            start_time = perf_counter()
            frame = cap.read()
            bgr = target_bgr.read() if target_bgr is not None else None
            if frame is None:
                if next_frame_id == 0:
                    raise ValueError("Can't read an image from the input")
                break
            if next_frame_id == 0:
                output_transform = OutputTransform(frame.shape[:2], args.output_resolution)
                if args.output_resolution:
                    output_resolution = output_transform.new_resolution
                else:
                    output_resolution = (frame.shape[1], frame.shape[0])
                presenter = monitors.Presenter(args.utilization_monitors, 55,
                                               (round(output_resolution[0] / 4), round(output_resolution[1] / 8)))
                if args.output and not video_writer.open(args.output, cv2.VideoWriter_fourcc(*'MJPG'),
                                                         cap.fps(), tuple(output_resolution)):
                    raise RuntimeError("Can't open video writer")
            # Submit for inference
            data = {'src': frame, 'bgr': input_bgr} if input_bgr is not None else frame
            pipeline.submit_data(data, next_frame_id, {'frame': frame, 'start_time': start_time})
            next_frame_id += 1
        else:
            # Wait for empty request
            pipeline.await_any()

        if pipeline.callback_exceptions:
            raise pipeline.callback_exceptions[0]
        # Process all completed requests
        results = pipeline.get_result(next_frame_id_to_show)
        if results:
            objects, frame_meta = results
            if args.raw_output_message:
                print_raw_results(objects, next_frame_id_to_show)
            frame = frame_meta['frame']
            start_time = frame_meta['start_time']
            rendering_start_time = perf_counter()
            frame = render_results(frame, objects, output_resolution, bgr, person_id,
                                   args.blur_bgr, args.show_with_original_frame)
            render_metrics.update(rendering_start_time)
            presenter.drawGraphs(frame)
            metrics.update(start_time, frame)

            if video_writer.isOpened() and (args.output_limit <= 0 or next_frame_id_to_show <= args.output_limit-1):
                video_writer.write(frame)
            next_frame_id_to_show += 1

            if not args.no_show:
                cv2.imshow('Background subtraction results', frame)
                key = cv2.waitKey(1)
                if key == 27 or key == 'q' or key == 'Q':
                    break
                presenter.handleKey(key)

    pipeline.await_all()
    # Process completed requests
    for next_frame_id_to_show in range(next_frame_id_to_show, next_frame_id):
        results = pipeline.get_result(next_frame_id_to_show)
        while results is None:
            results = pipeline.get_result(next_frame_id_to_show)
        objects, frame_meta = results
        if args.raw_output_message:
            print_raw_results(objects, next_frame_id_to_show, model.labels)
        frame = frame_meta['frame']
        start_time = frame_meta['start_time']

        rendering_start_time = perf_counter()
        frame = render_results(frame, objects, output_resolution, bgr, person_id,
                               args.blur_bgr, args.show_with_original_frame)
        render_metrics.update(rendering_start_time)
        presenter.drawGraphs(frame)
        metrics.update(start_time, frame)

        if video_writer.isOpened() and (args.output_limit <= 0 or next_frame_id_to_show <= args.output_limit-1):
            video_writer.write(frame)

        if not args.no_show:
            cv2.imshow('Background subtraction results', frame)
            cv2.waitKey(1)

    metrics.log_total()
    log_latency_per_stage(cap.reader_metrics.get_latency(),
                          pipeline.preprocess_metrics.get_latency(),
                          pipeline.inference_metrics.get_latency(),
                          pipeline.postprocess_metrics.get_latency(),
                          render_metrics.get_latency())
    for rep in presenter.reportMeans():
        log.info(rep)
def main():
    args = build_argparser().parse_args()
    if args.architecture_type != 'yolov4' and args.anchors:
        log.warning(
            'The "--anchors" option works only for "-at==yolov4". Option will be omitted'
        )
    if args.architecture_type != 'yolov4' and args.masks:
        log.warning(
            'The "--masks" option works only for "-at==yolov4". Option will be omitted'
        )
    if args.architecture_type not in ['nanodet', 'nanodet-plus'
                                      ] and args.num_classes:
        log.warning(
            'The "--num_classes" option works only for "-at==nanodet" and "-at==nanodet-plus". Option will be omitted'
        )

    cap = open_images_capture(args.input, args.loop)

    if args.adapter == 'openvino':
        plugin_config = get_user_config(args.device, args.num_streams,
                                        args.num_threads)
        model_adapter = OpenvinoAdapter(
            create_core(),
            args.model,
            device=args.device,
            plugin_config=plugin_config,
            max_num_requests=args.num_infer_requests,
            model_parameters={'input_layouts': args.layout})
    elif args.adapter == 'ovms':
        model_adapter = OVMSAdapter(args.model)

    configuration = {
        'resize_type': args.resize_type,
        'mean_values': args.mean_values,
        'scale_values': args.scale_values,
        'reverse_input_channels': args.reverse_input_channels,
        'path_to_labels': args.labels,
        'confidence_threshold': args.prob_threshold,
        'input_size': args.input_size,  # The CTPN specific
        'num_classes':
        args.num_classes,  # The NanoDet and NanoDetPlus specific
    }
    model = DetectionModel.create_model(args.architecture_type, model_adapter,
                                        configuration)
    model.log_layers_info()

    detector_pipeline = AsyncPipeline(model)

    next_frame_id = 0
    next_frame_id_to_show = 0

    palette = ColorPalette(len(model.labels) if model.labels else 100)
    metrics = PerformanceMetrics()
    render_metrics = PerformanceMetrics()
    presenter = None
    output_transform = None
    video_writer = cv2.VideoWriter()

    while True:
        if detector_pipeline.callback_exceptions:
            raise detector_pipeline.callback_exceptions[0]
        # Process all completed requests
        results = detector_pipeline.get_result(next_frame_id_to_show)
        if results:
            objects, frame_meta = results
            frame = frame_meta['frame']
            start_time = frame_meta['start_time']

            if len(objects) and args.raw_output_message:
                print_raw_results(objects, model.labels, next_frame_id_to_show)

            presenter.drawGraphs(frame)
            rendering_start_time = perf_counter()
            frame = draw_detections(frame, objects, palette, model.labels,
                                    output_transform)
            render_metrics.update(rendering_start_time)
            metrics.update(start_time, frame)

            if video_writer.isOpened() and (
                    args.output_limit <= 0
                    or next_frame_id_to_show <= args.output_limit - 1):
                video_writer.write(frame)
            next_frame_id_to_show += 1

            if not args.no_show:
                cv2.imshow('Detection Results', frame)
                key = cv2.waitKey(1)

                ESC_KEY = 27
                # Quit.
                if key in {ord('q'), ord('Q'), ESC_KEY}:
                    break
                presenter.handleKey(key)
            continue

        if detector_pipeline.is_ready():
            # Get new image/frame
            start_time = perf_counter()
            frame = cap.read()
            if frame is None:
                if next_frame_id == 0:
                    raise ValueError("Can't read an image from the input")
                break
            if next_frame_id == 0:
                output_transform = OutputTransform(frame.shape[:2],
                                                   args.output_resolution)
                if args.output_resolution:
                    output_resolution = output_transform.new_resolution
                else:
                    output_resolution = (frame.shape[1], frame.shape[0])
                presenter = monitors.Presenter(
                    args.utilization_monitors, 55,
                    (round(output_resolution[0] / 4),
                     round(output_resolution[1] / 8)))
                if args.output and not video_writer.open(
                        args.output, cv2.VideoWriter_fourcc(*'MJPG'),
                        cap.fps(), output_resolution):
                    raise RuntimeError("Can't open video writer")
            # Submit for inference
            detector_pipeline.submit_data(frame, next_frame_id, {
                'frame': frame,
                'start_time': start_time
            })
            next_frame_id += 1
        else:
            # Wait for empty request
            detector_pipeline.await_any()

    detector_pipeline.await_all()
    if detector_pipeline.callback_exceptions:
        raise detector_pipeline.callback_exceptions[0]
    # Process completed requests
    for next_frame_id_to_show in range(next_frame_id_to_show, next_frame_id):
        results = detector_pipeline.get_result(next_frame_id_to_show)
        objects, frame_meta = results
        frame = frame_meta['frame']
        start_time = frame_meta['start_time']

        if len(objects) and args.raw_output_message:
            print_raw_results(objects, model.labels, next_frame_id_to_show)

        presenter.drawGraphs(frame)
        rendering_start_time = perf_counter()
        frame = draw_detections(frame, objects, palette, model.labels,
                                output_transform)
        render_metrics.update(rendering_start_time)
        metrics.update(start_time, frame)

        if video_writer.isOpened() and (
                args.output_limit <= 0
                or next_frame_id_to_show <= args.output_limit - 1):
            video_writer.write(frame)

        if not args.no_show:
            cv2.imshow('Detection Results', frame)
            key = cv2.waitKey(1)

            ESC_KEY = 27
            # Quit.
            if key in {ord('q'), ord('Q'), ESC_KEY}:
                break
            presenter.handleKey(key)

    metrics.log_total()
    log_latency_per_stage(cap.reader_metrics.get_latency(),
                          detector_pipeline.preprocess_metrics.get_latency(),
                          detector_pipeline.inference_metrics.get_latency(),
                          detector_pipeline.postprocess_metrics.get_latency(),
                          render_metrics.get_latency())
    for rep in presenter.reportMeans():
        log.info(rep)
Esempio n. 6
0
def main():
    args = build_argparser().parse_args()

    cap = open_images_capture(args.input, args.loop)
    delay = int(cap.get_type() in {'VIDEO', 'CAMERA'})

    if args.adapter == 'openvino':
        plugin_config = get_user_config(args.device, args.num_streams,
                                        args.num_threads)
        model_adapter = OpenvinoAdapter(
            create_core(),
            args.model,
            device=args.device,
            plugin_config=plugin_config,
            max_num_requests=args.num_infer_requests)
    elif args.adapter == 'ovms':
        model_adapter = OVMSAdapter(args.model)

    config = {
        'mean_values': args.mean_values,
        'scale_values': args.scale_values,
        'reverse_input_channels': args.reverse_input_channels,
        'topk': args.topk,
        'path_to_labels': args.labels
    }
    model = Classification(model_adapter, config)
    model.log_layers_info()

    async_pipeline = AsyncPipeline(model)

    next_frame_id = 0
    next_frame_id_to_show = 0

    metrics = PerformanceMetrics()
    render_metrics = PerformanceMetrics()
    presenter = None
    output_transform = None
    video_writer = cv2.VideoWriter()
    ESC_KEY = 27
    key = -1
    while True:
        if async_pipeline.callback_exceptions:
            raise async_pipeline.callback_exceptions[0]
        # Process all completed requests
        results = async_pipeline.get_result(next_frame_id_to_show)
        if results:
            classifications, frame_meta = results
            frame = frame_meta['frame']
            start_time = frame_meta['start_time']
            if args.raw_output_message:
                print_raw_results(classifications, next_frame_id_to_show)

            presenter.drawGraphs(frame)
            rendering_start_time = perf_counter()
            frame = draw_labels(frame, classifications, output_transform)
            if delay or args.no_show:
                render_metrics.update(rendering_start_time)
                metrics.update(start_time, frame)

            if video_writer.isOpened() and (
                    args.output_limit <= 0
                    or next_frame_id_to_show <= args.output_limit - 1):
                video_writer.write(frame)
            next_frame_id_to_show += 1

            if not args.no_show:
                cv2.imshow('Classification Results', frame)
                key = cv2.waitKey(delay)
                # Quit.
                if key in {ord('q'), ord('Q'), ESC_KEY}:
                    break
                presenter.handleKey(key)
            continue

        if async_pipeline.is_ready():
            # Get new image/frame
            start_time = perf_counter()
            frame = cap.read()
            if frame is None:
                if next_frame_id == 0:
                    raise ValueError("Can't read an image from the input")
                break
            if next_frame_id == 0:
                output_transform = OutputTransform(frame.shape[:2],
                                                   args.output_resolution)
                if args.output_resolution:
                    output_resolution = output_transform.new_resolution
                else:
                    output_resolution = (frame.shape[1], frame.shape[0])
                presenter = monitors.Presenter(
                    args.utilization_monitors, 55,
                    (round(output_resolution[0] / 4),
                     round(output_resolution[1] / 8)))
                if args.output and not video_writer.open(
                        args.output, cv2.VideoWriter_fourcc(*'MJPG'),
                        cap.fps(), output_resolution):
                    raise RuntimeError("Can't open video writer")
            # Submit for inference
            async_pipeline.submit_data(frame, next_frame_id, {
                'frame': frame,
                'start_time': start_time
            })
            next_frame_id += 1

        else:
            # Wait for empty request
            async_pipeline.await_any()

    async_pipeline.await_all()
    if key not in {ord('q'), ord('Q'), ESC_KEY}:
        # Process completed requests
        for next_frame_id_to_show in range(next_frame_id_to_show,
                                           next_frame_id):
            results = async_pipeline.get_result(next_frame_id_to_show)
            while results is None:
                results = async_pipeline.get_result(next_frame_id_to_show)
            classifications, frame_meta = results
            frame = frame_meta['frame']
            start_time = frame_meta['start_time']

            if args.raw_output_message:
                print_raw_results(classifications, next_frame_id_to_show)

            presenter.drawGraphs(frame)
            rendering_start_time = perf_counter()
            frame = draw_labels(frame, classifications, output_transform)
            if delay or args.no_show:
                render_metrics.update(rendering_start_time)
                metrics.update(start_time, frame)

            if video_writer.isOpened() and (
                    args.output_limit <= 0
                    or next_frame_id_to_show <= args.output_limit - 1):
                video_writer.write(frame)

            if not args.no_show:
                cv2.imshow('Classification Results', frame)
                key = cv2.waitKey(delay)

                # Quit.
                if key in {ord('q'), ord('Q'), ESC_KEY}:
                    break
                presenter.handleKey(key)

    if delay or args.no_show:
        metrics.log_total()
        log_latency_per_stage(cap.reader_metrics.get_latency(),
                              async_pipeline.preprocess_metrics.get_latency(),
                              async_pipeline.inference_metrics.get_latency(),
                              async_pipeline.postprocess_metrics.get_latency(),
                              render_metrics.get_latency())
    for rep in presenter.reportMeans():
        log.info(rep)
Esempio n. 7
0
def main():
    args = build_argparser().parse_args()

    cap = open_images_capture(args.input, args.loop)

    if args.adapter == 'openvino':
        plugin_config = get_user_config(args.device, args.num_streams, args.num_threads)
        model_adapter = OpenvinoAdapter(create_core(), args.model, device=args.device, plugin_config=plugin_config,
                                        max_num_requests=args.num_infer_requests,
                                        model_parameters={'input_layouts': args.layout})
    elif args.adapter == 'ovms':
        model_adapter = OVMSAdapter(args.model)

    configuration = {
        'confidence_threshold': args.prob_threshold,
        'path_to_labels': args.labels,
    }
    model = get_model(model_adapter, configuration)
    model.log_layers_info()

    pipeline = AsyncPipeline(model)

    next_frame_id = 0
    next_frame_id_to_show = 0

    tracker = None
    if not args.no_track and cap.get_type() in {'VIDEO', 'CAMERA'}:
        tracker = StaticIOUTracker()
    visualizer = InstanceSegmentationVisualizer(model.labels, args.show_boxes, args.show_scores)

    metrics = PerformanceMetrics()
    render_metrics = PerformanceMetrics()
    presenter = None
    output_transform = None
    video_writer = cv2.VideoWriter()

    while True:
        if pipeline.is_ready():
            # Get new image/frame
            start_time = perf_counter()
            frame = cap.read()
            if frame is None:
                if next_frame_id == 0:
                    raise ValueError("Can't read an image from the input")
                break
            if next_frame_id == 0:
                output_transform = OutputTransform(frame.shape[:2], args.output_resolution)
                if args.output_resolution:
                    output_resolution = output_transform.new_resolution
                else:
                    output_resolution = (frame.shape[1], frame.shape[0])
                presenter = monitors.Presenter(args.utilization_monitors, 55,
                                               (round(output_resolution[0] / 4), round(output_resolution[1] / 8)))
                if args.output and not video_writer.open(args.output, cv2.VideoWriter_fourcc(*'MJPG'),
                                                         cap.fps(), tuple(output_resolution)):
                    raise RuntimeError("Can't open video writer")
            # Submit for inference
            pipeline.submit_data(frame, next_frame_id, {'frame': frame, 'start_time': start_time})
            next_frame_id += 1
        else:
            # Wait for empty request
            pipeline.await_any()

        if pipeline.callback_exceptions:
            raise pipeline.callback_exceptions[0]
        # Process all completed requests
        results = pipeline.get_result(next_frame_id_to_show)
        if results:
            (scores, classes, boxes, masks), frame_meta = results
            frame = frame_meta['frame']
            start_time = frame_meta['start_time']

            if args.raw_output_message:
                print_raw_results(boxes, classes, scores, next_frame_id_to_show)

            rendering_start_time = perf_counter()
            masks_tracks_ids = tracker(masks, classes) if tracker else None
            frame = visualizer(frame, boxes, classes, scores, masks, masks_tracks_ids)
            render_metrics.update(rendering_start_time)

            presenter.drawGraphs(frame)
            metrics.update(start_time, frame)

            if video_writer.isOpened() and (args.output_limit <= 0 or next_frame_id_to_show <= args.output_limit - 1):
                video_writer.write(frame)
            next_frame_id_to_show += 1

            if not args.no_show:
                cv2.imshow('Instance Segmentation results', frame)
                key = cv2.waitKey(1)
                if key == 27 or key == 'q' or key == 'Q':
                    break
                presenter.handleKey(key)

    pipeline.await_all()
    if pipeline.callback_exceptions:
        raise pipeline.callback_exceptions[0]
    # Process completed requests
    for next_frame_id_to_show in range(next_frame_id_to_show, next_frame_id):
        results = pipeline.get_result(next_frame_id_to_show)
        (scores, classes, boxes, masks), frame_meta = results
        frame = frame_meta['frame']
        start_time = frame_meta['start_time']

        if args.raw_output_message:
            print_raw_results(boxes, classes, scores, next_frame_id_to_show)

        rendering_start_time = perf_counter()
        masks_tracks_ids = tracker(masks, classes) if tracker else None
        frame = visualizer(frame, boxes, classes, scores, masks, masks_tracks_ids)
        render_metrics.update(rendering_start_time)

        presenter.drawGraphs(frame)
        metrics.update(start_time, frame)

        if video_writer.isOpened() and (args.output_limit <= 0 or next_frame_id_to_show <= args.output_limit - 1):
            video_writer.write(frame)

        if not args.no_show:
            cv2.imshow('Instance Segmentation results', frame)
            cv2.waitKey(1)

    metrics.log_total()
    log_latency_per_stage(cap.reader_metrics.get_latency(),
                          pipeline.preprocess_metrics.get_latency(),
                          pipeline.inference_metrics.get_latency(),
                          pipeline.postprocess_metrics.get_latency(),
                          render_metrics.get_latency())
    for rep in presenter.reportMeans():
        log.info(rep)