Exemplo n.º 1
0
    def __init__(
        self,
        hparams: OTEClassificationParameters,
        label_schema: LabelSchemaEntity,
        model_file: Union[str, bytes],
        weight_file: Union[str, bytes, None] = None,
        device: str = "CPU",
        num_requests: int = 1,
    ):
        """
        Inferencer implementation for OTEDetection using OpenVINO backend.
        :param model: Path to model to load, `.xml`, `.bin` or `.onnx` file.
        :param hparams: Hyper parameters that the model should use.
        :param num_requests: Maximum number of requests that the inferencer can make.
            Good value is the number of available cores. Defaults to 1.
        :param device: Device to run inference on, such as CPU, GPU or MYRIAD. Defaults to "CPU".
        """

        multilabel = len(label_schema.get_groups(False)) > 1 and \
            len(label_schema.get_groups(False)) == len(label_schema.get_labels(include_empty=False))

        self.label_schema = label_schema

        model_adapter = OpenvinoAdapter(create_core(),
                                        model_file,
                                        weight_file,
                                        device=device,
                                        max_num_requests=num_requests)
        self.configuration = {'multilabel': multilabel}
        self.model = Model.create_model("ote_classification",
                                        model_adapter,
                                        self.configuration,
                                        preload=True)

        self.converter = ClassificationToAnnotationConverter(self.label_schema)
Exemplo n.º 2
0
def create_model(model_path: Path,
                 config_file: Optional[Path] = None) -> Model:
    """
    Create model using ModelAPI factory
    """

    model_adapter = OpenvinoAdapter(create_core(), get_model_path(model_path))
    parameters = get_parameters(config_file)
    try:
        importlib.import_module(".model", "demo_package")
    except ImportError:
        print("Using model wrapper from Open Model Zoo ModelAPI")
    # labels for modelAPI wrappers can be empty, because unused in pre- and postprocessing
    parameters["model_parameters"]["labels"] = []
    model = Model.create_model(
        parameters["type_of_model"],
        model_adapter,
        parameters["model_parameters"],
        preload=True,
    )

    return model
Exemplo n.º 3
0
def main():
    args = build_argparser().parse_args()

    cap = open_images_capture(args.input, args.loop)
    next_frame_id = 1
    next_frame_id_to_show = 0

    metrics = PerformanceMetrics()
    render_metrics = PerformanceMetrics()
    video_writer = cv2.VideoWriter()

    if args.adapter == 'openvino':
        plugin_config = get_user_config(args.device, args.num_streams,
                                        args.num_threads)
        model_adapter = OpenvinoAdapter(
            create_core(),
            args.model,
            device=args.device,
            plugin_config=plugin_config,
            max_num_requests=args.num_infer_requests)
    elif args.adapter == 'ovms':
        model_adapter = OVMSAdapter(args.model)

    start_time = perf_counter()
    frame = cap.read()
    if frame is None:
        raise RuntimeError("Can't read an image from the input")

    model = Deblurring(model_adapter, preload=False)
    model.reshape(frame.shape)
    model.log_layers_info()

    pipeline = AsyncPipeline(model)

    pipeline.submit_data(frame, 0, {'frame': frame, 'start_time': start_time})

    presenter = monitors.Presenter(
        args.utilization_monitors, 55,
        (round(frame.shape[1] / 4), round(frame.shape[0] / 8)))
    if args.output and not video_writer.open(
            args.output, cv2.VideoWriter_fourcc(*'MJPG'), cap.fps(),
        (2 * frame.shape[1], frame.shape[0])):
        raise RuntimeError("Can't open video writer")

    while True:
        if pipeline.is_ready():
            # Get new image/frame
            start_time = perf_counter()
            frame = cap.read()
            if frame is None:
                break

            # Submit for inference
            pipeline.submit_data(frame, next_frame_id, {
                'frame': frame,
                'start_time': start_time
            })
            next_frame_id += 1
        else:
            # Wait for empty request
            pipeline.await_any()

        if pipeline.callback_exceptions:
            raise pipeline.callback_exceptions[0]
        # Process all completed requests
        results = pipeline.get_result(next_frame_id_to_show)
        if results:
            result_frame, frame_meta = results
            input_frame = frame_meta['frame']
            start_time = frame_meta['start_time']

            rendering_start_time = perf_counter()
            if input_frame.shape != result_frame.shape:
                input_frame = cv2.resize(
                    input_frame,
                    (result_frame.shape[1], result_frame.shape[0]))
            final_image = cv2.hconcat([input_frame, result_frame])
            render_metrics.update(rendering_start_time)

            presenter.drawGraphs(final_image)
            metrics.update(start_time, final_image)

            if video_writer.isOpened() and (
                    args.output_limit <= 0
                    or next_frame_id_to_show <= args.output_limit - 1):
                video_writer.write(final_image)
            next_frame_id_to_show += 1

            if not args.no_show:
                cv2.imshow('Deblurring Results', final_image)
                key = cv2.waitKey(1)
                if key == 27 or key == 'q' or key == 'Q':
                    break
                presenter.handleKey(key)

    pipeline.await_all()
    # Process completed requests
    for next_frame_id_to_show in range(next_frame_id_to_show, next_frame_id):
        results = pipeline.get_result(next_frame_id_to_show)
        while results is None:
            results = pipeline.get_result(next_frame_id_to_show)
        result_frame, frame_meta = results
        input_frame = frame_meta['frame']
        start_time = frame_meta['start_time']

        rendering_start_time = perf_counter()
        if input_frame.shape != result_frame.shape:
            input_frame = cv2.resize(
                input_frame, (result_frame.shape[1], result_frame.shape[0]))
        final_image = cv2.hconcat([input_frame, result_frame])
        render_metrics.update(rendering_start_time)

        presenter.drawGraphs(final_image)
        metrics.update(start_time, final_image)

        if video_writer.isOpened() and (
                args.output_limit <= 0
                or next_frame_id_to_show <= args.output_limit - 1):
            video_writer.write(final_image)

        if not args.no_show:
            cv2.imshow('Deblurring Results', final_image)
            key = cv2.waitKey(1)

    metrics.log_total()
    log_latency_per_stage(cap.reader_metrics.get_latency(),
                          pipeline.preprocess_metrics.get_latency(),
                          pipeline.inference_metrics.get_latency(),
                          pipeline.postprocess_metrics.get_latency(),
                          render_metrics.get_latency())
    for rep in presenter.reportMeans():
        log.info(rep)
Exemplo n.º 4
0
def main():
    args = build_argparser().parse_args()

    paragraphs = get_paragraphs(args.input)

    preprocessing_start_time = perf_counter()
    vocab = load_vocab_file(args.vocab)
    log.debug("Loaded vocab file from {}, get {} tokens".format(args.vocab, len(vocab)))

    # get context as a string (as we might need it's length for the sequence reshape)
    context = '\n'.join(paragraphs)
    sentences = re.split(sentence_splitter, context)
    preprocessed_sentences = [text_to_tokens(sentence, vocab) for sentence in sentences]
    max_sentence_length = max([len(tokens) + 2 for tokens, _ in preprocessed_sentences])
    preprocessing_total_time = (perf_counter() - preprocessing_start_time) * 1e3
    source = tuple(zip(sentences, preprocessed_sentences))

    if args.adapter == 'openvino':
        plugin_config = get_user_config(args.device, args.num_streams, args.num_threads)
        model_adapter = OpenvinoAdapter(create_core(), args.model, device=args.device, plugin_config=plugin_config,
                                        max_num_requests=args.num_infer_requests, model_parameters = {'input_layouts': args.layout})
    elif args.adapter == 'ovms':
        model_adapter = OVMSAdapter(args.model)

    enable_padding = not args.dynamic_shape
    model = BertNamedEntityRecognition(model_adapter, {'vocab': vocab, 'input_names': args.input_names, 'enable_padding': enable_padding})
    if max_sentence_length > model.max_length:
        model.reshape(max_sentence_length if enable_padding else (1, max_sentence_length))
    model.log_layers_info()

    pipeline = AsyncPipeline(model)

    next_sentence_id = 0
    next_sentence_id_to_show = 0
    start_time = perf_counter()

    while True:
        if pipeline.callback_exceptions:
            raise pipeline.callback_exceptions[0]
        results = pipeline.get_result(next_sentence_id_to_show)
        if results:
            (score, filtered_labels_id), meta = results
            next_sentence_id_to_show += 1
            print_raw_results(score, filtered_labels_id, meta)
            continue

        if pipeline.is_ready():
            if next_sentence_id == len(source):
                break
            sentence, (c_tokens_id, c_token_s_e) = source[next_sentence_id]
            pipeline.submit_data(c_tokens_id, next_sentence_id, {'sentence': sentence, 'c_token_s_e': c_token_s_e})
            next_sentence_id += 1
        else:
            pipeline.await_any()

    pipeline.await_all()
    if pipeline.callback_exceptions:
        raise pipeline.callback_exceptions[0]
    for sentence_id in range(next_sentence_id_to_show, next_sentence_id):
        results = pipeline.get_result(sentence_id)
        (score, filtered_labels_id), meta = results
        print_raw_results(score, filtered_labels_id, meta)

    total_latency = (perf_counter() - start_time) * 1e3 + preprocessing_total_time
    log.info("Metrics report:")
    log.info("\tLatency: {:.1f} ms".format(total_latency))
def main():
    args = build_argparser().parse_args()

    cap = open_images_capture(args.input, args.loop)
    next_frame_id = 1
    next_frame_id_to_show = 0

    metrics = PerformanceMetrics()
    render_metrics = PerformanceMetrics()
    video_writer = cv2.VideoWriter()

    plugin_config = get_user_config(args.device, args.num_streams,
                                    args.num_threads)
    model_adapter = OpenvinoAdapter(
        create_core(),
        args.model,
        device=args.device,
        plugin_config=plugin_config,
        max_num_requests=args.num_infer_requests,
        model_parameters={'input_layouts': args.layout})

    start_time = perf_counter()
    frame = cap.read()
    if frame is None:
        raise RuntimeError("Can't read an image from the input")

    config = {
        'target_size': args.tsize,
        'aspect_ratio': frame.shape[1] / frame.shape[0],
        'confidence_threshold': args.prob_threshold,
        'padding_mode': 'center' if args.architecture_type == 'higherhrnet'
        else None,  # the 'higherhrnet' and 'ae' specific
        'delta': 0.5 if args.architecture_type == 'higherhrnet' else
        None,  # the 'higherhrnet' and 'ae' specific
    }
    model = ImageModel.create_model(ARCHITECTURES[args.architecture_type],
                                    model_adapter, config)
    model.log_layers_info()

    hpe_pipeline = AsyncPipeline(model)
    hpe_pipeline.submit_data(frame, 0, {
        'frame': frame,
        'start_time': start_time
    })

    output_transform = OutputTransform(frame.shape[:2], args.output_resolution)
    if args.output_resolution:
        output_resolution = output_transform.new_resolution
    else:
        output_resolution = (frame.shape[1], frame.shape[0])
    presenter = monitors.Presenter(
        args.utilization_monitors, 55,
        (round(output_resolution[0] / 4), round(output_resolution[1] / 8)))
    if args.output and not video_writer.open(args.output,
                                             cv2.VideoWriter_fourcc(*'MJPG'),
                                             cap.fps(), output_resolution):
        raise RuntimeError("Can't open video writer")

    while True:
        if hpe_pipeline.callback_exceptions:
            raise hpe_pipeline.callback_exceptions[0]
        # Process all completed requests
        results = hpe_pipeline.get_result(next_frame_id_to_show)
        if results:
            (poses, scores), frame_meta = results
            frame = frame_meta['frame']
            start_time = frame_meta['start_time']

            if len(poses) and args.raw_output_message:
                print_raw_results(poses, scores, next_frame_id_to_show)

            presenter.drawGraphs(frame)
            rendering_start_time = perf_counter()
            frame = draw_poses(frame, poses, args.prob_threshold,
                               output_transform)
            render_metrics.update(rendering_start_time)
            metrics.update(start_time, frame)
            if video_writer.isOpened() and (
                    args.output_limit <= 0
                    or next_frame_id_to_show <= args.output_limit - 1):
                video_writer.write(frame)
            next_frame_id_to_show += 1
            if not args.no_show:
                cv2.imshow('Pose estimation results', frame)
                key = cv2.waitKey(1)

                ESC_KEY = 27
                # Quit.
                if key in {ord('q'), ord('Q'), ESC_KEY}:
                    break
                presenter.handleKey(key)
            continue

        if hpe_pipeline.is_ready():
            # Get new image/frame
            start_time = perf_counter()
            frame = cap.read()
            if frame is None:
                break

            # Submit for inference
            hpe_pipeline.submit_data(frame, next_frame_id, {
                'frame': frame,
                'start_time': start_time
            })
            next_frame_id += 1

        else:
            # Wait for empty request
            hpe_pipeline.await_any()

    hpe_pipeline.await_all()
    if hpe_pipeline.callback_exceptions:
        raise hpe_pipeline.callback_exceptions[0]
    # Process completed requests
    for next_frame_id_to_show in range(next_frame_id_to_show, next_frame_id):
        results = hpe_pipeline.get_result(next_frame_id_to_show)
        (poses, scores), frame_meta = results
        frame = frame_meta['frame']
        start_time = frame_meta['start_time']

        if len(poses) and args.raw_output_message:
            print_raw_results(poses, scores, next_frame_id_to_show)

        presenter.drawGraphs(frame)
        rendering_start_time = perf_counter()
        frame = draw_poses(frame, poses, args.prob_threshold, output_transform)
        render_metrics.update(rendering_start_time)
        metrics.update(start_time, frame)
        if video_writer.isOpened() and (
                args.output_limit <= 0
                or next_frame_id_to_show <= args.output_limit - 1):
            video_writer.write(frame)
        if not args.no_show:
            cv2.imshow('Pose estimation results', frame)
            key = cv2.waitKey(1)

            ESC_KEY = 27
            # Quit.
            if key in {ord('q'), ord('Q'), ESC_KEY}:
                break
            presenter.handleKey(key)

    metrics.log_total()
    log_latency_per_stage(cap.reader_metrics.get_latency(),
                          hpe_pipeline.preprocess_metrics.get_latency(),
                          hpe_pipeline.inference_metrics.get_latency(),
                          hpe_pipeline.postprocess_metrics.get_latency(),
                          render_metrics.get_latency())
    for rep in presenter.reportMeans():
        log.info(rep)
def main():
    args = build_argparser().parse_args()

    cap = open_images_capture(args.input, args.loop)

    if args.adapter == 'openvino':
        plugin_config = get_user_config(args.device, args.num_streams,
                                        args.num_threads)
        model_adapter = OpenvinoAdapter(
            create_core(),
            args.model,
            device=args.device,
            plugin_config=plugin_config,
            max_num_requests=args.num_infer_requests,
            model_parameters={'input_layouts': args.layout})
    elif args.adapter == 'ovms':
        model_adapter = OVMSAdapter(args.model)

    model = SegmentationModel.create_model(args.architecture_type,
                                           model_adapter,
                                           {'path_to_labels': args.labels})
    if args.architecture_type == 'segmentation':
        visualizer = SegmentationVisualizer(args.colors)
    if args.architecture_type == 'salient_object_detection':
        visualizer = SaliencyMapVisualizer()
    model.log_layers_info()

    pipeline = AsyncPipeline(model)

    next_frame_id = 0
    next_frame_id_to_show = 0

    metrics = PerformanceMetrics()
    render_metrics = PerformanceMetrics()
    presenter = None
    output_transform = None
    video_writer = cv2.VideoWriter()
    only_masks = args.only_masks
    while True:
        if pipeline.is_ready():
            # Get new image/frame
            start_time = perf_counter()
            frame = cap.read()
            if frame is None:
                if next_frame_id == 0:
                    raise ValueError("Can't read an image from the input")
                break
            if next_frame_id == 0:
                output_transform = OutputTransform(frame.shape[:2],
                                                   args.output_resolution)
                if args.output_resolution:
                    output_resolution = output_transform.new_resolution
                else:
                    output_resolution = (frame.shape[1], frame.shape[0])
                presenter = monitors.Presenter(
                    args.utilization_monitors, 55,
                    (round(output_resolution[0] / 4),
                     round(output_resolution[1] / 8)))
                if args.output and not video_writer.open(
                        args.output, cv2.VideoWriter_fourcc(*'MJPG'),
                        cap.fps(), output_resolution):
                    raise RuntimeError("Can't open video writer")
            # Submit for inference
            pipeline.submit_data(frame, next_frame_id, {
                'frame': frame,
                'start_time': start_time
            })
            next_frame_id += 1
        else:
            # Wait for empty request
            pipeline.await_any()

        if pipeline.callback_exceptions:
            raise pipeline.callback_exceptions[0]
        # Process all completed requests
        results = pipeline.get_result(next_frame_id_to_show)
        if results:
            objects, frame_meta = results
            if args.raw_output_message:
                print_raw_results(objects, next_frame_id_to_show, model.labels)
            frame = frame_meta['frame']
            start_time = frame_meta['start_time']
            rendering_start_time = perf_counter()
            frame = render_segmentation(frame, objects, visualizer,
                                        output_transform, only_masks)
            render_metrics.update(rendering_start_time)
            presenter.drawGraphs(frame)
            metrics.update(start_time, frame)

            if video_writer.isOpened() and (
                    args.output_limit <= 0
                    or next_frame_id_to_show <= args.output_limit - 1):
                video_writer.write(frame)
            next_frame_id_to_show += 1

            if not args.no_show:
                cv2.imshow('Segmentation Results', frame)
                key = cv2.waitKey(1)
                if key == 27 or key == 'q' or key == 'Q':
                    break
                if key == 9:
                    only_masks = not only_masks
                presenter.handleKey(key)

    pipeline.await_all()
    if pipeline.callback_exceptions:
        raise pipeline.callback_exceptions[0]
    # Process completed requests
    for next_frame_id_to_show in range(next_frame_id_to_show, next_frame_id):
        results = pipeline.get_result(next_frame_id_to_show)
        objects, frame_meta = results
        if args.raw_output_message:
            print_raw_results(objects, next_frame_id_to_show, model.labels)
        frame = frame_meta['frame']
        start_time = frame_meta['start_time']

        rendering_start_time = perf_counter()
        frame = render_segmentation(frame, objects, visualizer,
                                    output_transform, only_masks)
        render_metrics.update(rendering_start_time)
        presenter.drawGraphs(frame)
        metrics.update(start_time, frame)

        if video_writer.isOpened() and (
                args.output_limit <= 0
                or next_frame_id_to_show <= args.output_limit - 1):
            video_writer.write(frame)

        if not args.no_show:
            cv2.imshow('Segmentation Results', frame)
            key = cv2.waitKey(1)

    metrics.log_total()
    log_latency_per_stage(cap.reader_metrics.get_latency(),
                          pipeline.preprocess_metrics.get_latency(),
                          pipeline.inference_metrics.get_latency(),
                          pipeline.postprocess_metrics.get_latency(),
                          render_metrics.get_latency())
    for rep in presenter.reportMeans():
        log.info(rep)
def main():
    args = build_argparser().parse_args()

    cap = open_images_capture(args.input, args.loop)

    target_bgr = open_images_capture(args.target_bgr, loop=True) if args.target_bgr else None

    if args.adapter == 'openvino':
        plugin_config = get_user_config(args.device, args.num_streams, args.num_threads)
        model_adapter = OpenvinoAdapter(create_core(), args.model, device=args.device, plugin_config=plugin_config,
                                        max_num_requests=args.num_infer_requests)
    elif args.adapter == 'ovms':
        model_adapter = OVMSAdapter(args.model)

    labels = ['__background__', 'person'] if args.labels is None else load_labels(args.labels)
    assert len(labels), 'The file with class labels is empty'

    configuration = {
        'confidence_threshold': args.prob_threshold,
        'resize_type': args.resize_type
    }

    model, need_bgr_input = get_model(model_adapter, configuration, args)

    input_bgr = open_images_capture(args.background, False).read() if need_bgr_input else None

    person_id = -1
    for i, label in enumerate(labels):
        if label == 'person':
            person_id = i
            break
    assert person_id >= 0, 'Person class did not find in labels list.'

    model.log_layers_info()

    pipeline = AsyncPipeline(model)

    next_frame_id = 0
    next_frame_id_to_show = 0

    metrics = PerformanceMetrics()
    render_metrics = PerformanceMetrics()
    presenter = None
    output_transform = None
    video_writer = cv2.VideoWriter()
    while True:
        if pipeline.is_ready():
            # Get new image/frame
            start_time = perf_counter()
            frame = cap.read()
            bgr = target_bgr.read() if target_bgr is not None else None
            if frame is None:
                if next_frame_id == 0:
                    raise ValueError("Can't read an image from the input")
                break
            if next_frame_id == 0:
                output_transform = OutputTransform(frame.shape[:2], args.output_resolution)
                if args.output_resolution:
                    output_resolution = output_transform.new_resolution
                else:
                    output_resolution = (frame.shape[1], frame.shape[0])
                presenter = monitors.Presenter(args.utilization_monitors, 55,
                                               (round(output_resolution[0] / 4), round(output_resolution[1] / 8)))
                if args.output and not video_writer.open(args.output, cv2.VideoWriter_fourcc(*'MJPG'),
                                                         cap.fps(), tuple(output_resolution)):
                    raise RuntimeError("Can't open video writer")
            # Submit for inference
            data = {'src': frame, 'bgr': input_bgr} if input_bgr is not None else frame
            pipeline.submit_data(data, next_frame_id, {'frame': frame, 'start_time': start_time})
            next_frame_id += 1
        else:
            # Wait for empty request
            pipeline.await_any()

        if pipeline.callback_exceptions:
            raise pipeline.callback_exceptions[0]
        # Process all completed requests
        results = pipeline.get_result(next_frame_id_to_show)
        if results:
            objects, frame_meta = results
            if args.raw_output_message:
                print_raw_results(objects, next_frame_id_to_show)
            frame = frame_meta['frame']
            start_time = frame_meta['start_time']
            rendering_start_time = perf_counter()
            frame = render_results(frame, objects, output_resolution, bgr, person_id,
                                   args.blur_bgr, args.show_with_original_frame)
            render_metrics.update(rendering_start_time)
            presenter.drawGraphs(frame)
            metrics.update(start_time, frame)

            if video_writer.isOpened() and (args.output_limit <= 0 or next_frame_id_to_show <= args.output_limit-1):
                video_writer.write(frame)
            next_frame_id_to_show += 1

            if not args.no_show:
                cv2.imshow('Background subtraction results', frame)
                key = cv2.waitKey(1)
                if key == 27 or key == 'q' or key == 'Q':
                    break
                presenter.handleKey(key)

    pipeline.await_all()
    # Process completed requests
    for next_frame_id_to_show in range(next_frame_id_to_show, next_frame_id):
        results = pipeline.get_result(next_frame_id_to_show)
        while results is None:
            results = pipeline.get_result(next_frame_id_to_show)
        objects, frame_meta = results
        if args.raw_output_message:
            print_raw_results(objects, next_frame_id_to_show, model.labels)
        frame = frame_meta['frame']
        start_time = frame_meta['start_time']

        rendering_start_time = perf_counter()
        frame = render_results(frame, objects, output_resolution, bgr, person_id,
                               args.blur_bgr, args.show_with_original_frame)
        render_metrics.update(rendering_start_time)
        presenter.drawGraphs(frame)
        metrics.update(start_time, frame)

        if video_writer.isOpened() and (args.output_limit <= 0 or next_frame_id_to_show <= args.output_limit-1):
            video_writer.write(frame)

        if not args.no_show:
            cv2.imshow('Background subtraction results', frame)
            cv2.waitKey(1)

    metrics.log_total()
    log_latency_per_stage(cap.reader_metrics.get_latency(),
                          pipeline.preprocess_metrics.get_latency(),
                          pipeline.inference_metrics.get_latency(),
                          pipeline.postprocess_metrics.get_latency(),
                          render_metrics.get_latency())
    for rep in presenter.reportMeans():
        log.info(rep)
Exemplo n.º 8
0
def main():
    args = build_argparser().parse_args()

    cap = open_images_capture(args.input, args.loop)

    if args.adapter == 'openvino':
        plugin_config = get_user_config(args.device, args.num_streams, args.num_threads)
        model_adapter = OpenvinoAdapter(create_core(), args.model, device=args.device, plugin_config=plugin_config,
                                        max_num_requests=args.num_infer_requests)
    elif args.adapter == 'ovms':
        model_adapter = OVMSAdapter(args.model)

    model = MonoDepthModel(model_adapter)
    model.log_layers_info()

    pipeline = AsyncPipeline(model)

    next_frame_id = 0
    next_frame_id_to_show = 0

    metrics = PerformanceMetrics()
    presenter = None
    output_transform = None
    video_writer = cv2.VideoWriter()

    while True:
        if pipeline.is_ready():
            # Get new image/frame
            start_time = perf_counter()
            frame = cap.read()
            if frame is None:
                if next_frame_id == 0:
                    raise ValueError("Can't read an image from the input")
                break
            if next_frame_id == 0:
                output_transform = OutputTransform(frame.shape[:2], args.output_resolution)
                if args.output_resolution:
                    output_resolution = output_transform.new_resolution
                else:
                    output_resolution = (frame.shape[1], frame.shape[0])
                presenter = monitors.Presenter(args.utilization_monitors, 55,
                                               (round(output_resolution[0] / 4), round(output_resolution[1] / 8)))
                if args.output and not video_writer.open(args.output, cv2.VideoWriter_fourcc(*'MJPG'),
                                                         cap.fps(), output_resolution):
                    raise RuntimeError("Can't open video writer")
            # Submit for inference
            pipeline.submit_data(frame, next_frame_id, {'start_time': start_time})
            next_frame_id += 1
        else:
            # Wait for empty request
            pipeline.await_any()

        if pipeline.callback_exceptions:
            raise pipeline.callback_exceptions[0]
        # Process all completed requests
        results = pipeline.get_result(next_frame_id_to_show)
        if results:
            depth_map, frame_meta = results
            depth_map = apply_color_map(depth_map, output_transform)

            start_time = frame_meta['start_time']
            presenter.drawGraphs(depth_map)
            metrics.update(start_time, depth_map)

            if video_writer.isOpened() and (args.output_limit <= 0 or next_frame_id_to_show <= args.output_limit-1):
                video_writer.write(depth_map)
            next_frame_id_to_show += 1

            if not args.no_show:
                cv2.imshow(DEMO_NAME, depth_map)
                key = cv2.waitKey(1)
                if key == 27 or key == 'q' or key == 'Q':
                    break
                presenter.handleKey(key)

    pipeline.await_all()
    # Process completed requests
    for next_frame_id_to_show in range(next_frame_id_to_show, next_frame_id):
        results = pipeline.get_result(next_frame_id_to_show)
        while results is None:
            results = pipeline.get_result(next_frame_id_to_show)
        depth_map, frame_meta = results
        depth_map = apply_color_map(depth_map, output_transform)

        start_time = frame_meta['start_time']

        presenter.drawGraphs(depth_map)
        metrics.update(start_time, depth_map)

        if video_writer.isOpened() and (args.output_limit <= 0 or next_frame_id_to_show <= args.output_limit-1):
            video_writer.write(depth_map)

        if not args.no_show:
            cv2.imshow(DEMO_NAME, depth_map)
            key = cv2.waitKey(1)
            if key == 27 or key == 'q' or key == 'Q':
                break
            presenter.handleKey(key)

    metrics.log_total()
    for rep in presenter.reportMeans():
        log.info(rep)
def main():
    args = build_argparser().parse_args()
    if args.architecture_type != 'yolov4' and args.anchors:
        log.warning(
            'The "--anchors" option works only for "-at==yolov4". Option will be omitted'
        )
    if args.architecture_type != 'yolov4' and args.masks:
        log.warning(
            'The "--masks" option works only for "-at==yolov4". Option will be omitted'
        )
    if args.architecture_type not in ['nanodet', 'nanodet-plus'
                                      ] and args.num_classes:
        log.warning(
            'The "--num_classes" option works only for "-at==nanodet" and "-at==nanodet-plus". Option will be omitted'
        )

    cap = open_images_capture(args.input, args.loop)

    if args.adapter == 'openvino':
        plugin_config = get_user_config(args.device, args.num_streams,
                                        args.num_threads)
        model_adapter = OpenvinoAdapter(
            create_core(),
            args.model,
            device=args.device,
            plugin_config=plugin_config,
            max_num_requests=args.num_infer_requests,
            model_parameters={'input_layouts': args.layout})
    elif args.adapter == 'ovms':
        model_adapter = OVMSAdapter(args.model)

    configuration = {
        'resize_type': args.resize_type,
        'mean_values': args.mean_values,
        'scale_values': args.scale_values,
        'reverse_input_channels': args.reverse_input_channels,
        'path_to_labels': args.labels,
        'confidence_threshold': args.prob_threshold,
        'input_size': args.input_size,  # The CTPN specific
        'num_classes':
        args.num_classes,  # The NanoDet and NanoDetPlus specific
    }
    model = DetectionModel.create_model(args.architecture_type, model_adapter,
                                        configuration)
    model.log_layers_info()

    detector_pipeline = AsyncPipeline(model)

    next_frame_id = 0
    next_frame_id_to_show = 0

    palette = ColorPalette(len(model.labels) if model.labels else 100)
    metrics = PerformanceMetrics()
    render_metrics = PerformanceMetrics()
    presenter = None
    output_transform = None
    video_writer = cv2.VideoWriter()

    while True:
        if detector_pipeline.callback_exceptions:
            raise detector_pipeline.callback_exceptions[0]
        # Process all completed requests
        results = detector_pipeline.get_result(next_frame_id_to_show)
        if results:
            objects, frame_meta = results
            frame = frame_meta['frame']
            start_time = frame_meta['start_time']

            if len(objects) and args.raw_output_message:
                print_raw_results(objects, model.labels, next_frame_id_to_show)

            presenter.drawGraphs(frame)
            rendering_start_time = perf_counter()
            frame = draw_detections(frame, objects, palette, model.labels,
                                    output_transform)
            render_metrics.update(rendering_start_time)
            metrics.update(start_time, frame)

            if video_writer.isOpened() and (
                    args.output_limit <= 0
                    or next_frame_id_to_show <= args.output_limit - 1):
                video_writer.write(frame)
            next_frame_id_to_show += 1

            if not args.no_show:
                cv2.imshow('Detection Results', frame)
                key = cv2.waitKey(1)

                ESC_KEY = 27
                # Quit.
                if key in {ord('q'), ord('Q'), ESC_KEY}:
                    break
                presenter.handleKey(key)
            continue

        if detector_pipeline.is_ready():
            # Get new image/frame
            start_time = perf_counter()
            frame = cap.read()
            if frame is None:
                if next_frame_id == 0:
                    raise ValueError("Can't read an image from the input")
                break
            if next_frame_id == 0:
                output_transform = OutputTransform(frame.shape[:2],
                                                   args.output_resolution)
                if args.output_resolution:
                    output_resolution = output_transform.new_resolution
                else:
                    output_resolution = (frame.shape[1], frame.shape[0])
                presenter = monitors.Presenter(
                    args.utilization_monitors, 55,
                    (round(output_resolution[0] / 4),
                     round(output_resolution[1] / 8)))
                if args.output and not video_writer.open(
                        args.output, cv2.VideoWriter_fourcc(*'MJPG'),
                        cap.fps(), output_resolution):
                    raise RuntimeError("Can't open video writer")
            # Submit for inference
            detector_pipeline.submit_data(frame, next_frame_id, {
                'frame': frame,
                'start_time': start_time
            })
            next_frame_id += 1
        else:
            # Wait for empty request
            detector_pipeline.await_any()

    detector_pipeline.await_all()
    if detector_pipeline.callback_exceptions:
        raise detector_pipeline.callback_exceptions[0]
    # Process completed requests
    for next_frame_id_to_show in range(next_frame_id_to_show, next_frame_id):
        results = detector_pipeline.get_result(next_frame_id_to_show)
        objects, frame_meta = results
        frame = frame_meta['frame']
        start_time = frame_meta['start_time']

        if len(objects) and args.raw_output_message:
            print_raw_results(objects, model.labels, next_frame_id_to_show)

        presenter.drawGraphs(frame)
        rendering_start_time = perf_counter()
        frame = draw_detections(frame, objects, palette, model.labels,
                                output_transform)
        render_metrics.update(rendering_start_time)
        metrics.update(start_time, frame)

        if video_writer.isOpened() and (
                args.output_limit <= 0
                or next_frame_id_to_show <= args.output_limit - 1):
            video_writer.write(frame)

        if not args.no_show:
            cv2.imshow('Detection Results', frame)
            key = cv2.waitKey(1)

            ESC_KEY = 27
            # Quit.
            if key in {ord('q'), ord('Q'), ESC_KEY}:
                break
            presenter.handleKey(key)

    metrics.log_total()
    log_latency_per_stage(cap.reader_metrics.get_latency(),
                          detector_pipeline.preprocess_metrics.get_latency(),
                          detector_pipeline.inference_metrics.get_latency(),
                          detector_pipeline.postprocess_metrics.get_latency(),
                          render_metrics.get_latency())
    for rep in presenter.reportMeans():
        log.info(rep)
def main():
    args = build_argparser().parse_args()

    paragraphs = get_paragraphs(args.input)

    preprocessing_start_time = perf_counter()
    vocab = load_vocab_file(args.vocab)
    log.debug("Loaded vocab file from {}, get {} tokens".format(
        args.vocab, len(vocab)))

    # get context as a string (as we might need it's length for the sequence reshape)
    context = '\n'.join(paragraphs)
    visualizer = Visualizer(context, args.colors)
    # encode context into token ids list
    c_tokens = text_to_tokens(context.lower(), vocab)
    total_latency = (perf_counter() - preprocessing_start_time) * 1e3

    if args.adapter == 'openvino':
        plugin_config = get_user_config(args.device, args.num_streams,
                                        args.num_threads)
        model_adapter = OpenvinoAdapter(
            create_core(),
            args.model,
            device=args.device,
            plugin_config=plugin_config,
            max_num_requests=args.num_infer_requests)
    elif args.adapter == 'ovms':
        model_adapter = OVMSAdapter(args.model)

    config = {
        'vocab': vocab,
        'input_names': args.input_names,
        'output_names': args.output_names,
        'max_answer_token_num': args.max_answer_token_num,
        'squad_ver': args.model_squad_ver
    }
    model = BertQuestionAnswering(model_adapter, config)
    if args.reshape:
        # find the closest multiple of 64, if it is smaller than current network's sequence length, do reshape
        new_length = min(
            model.max_length,
            int(
                np.ceil(
                    (len(c_tokens[0]) + args.max_question_token_num) / 64) *
                64))
        if new_length < model.max_length:
            try:
                model.reshape(new_length)
            except RuntimeError:
                log.error(
                    "Failed to reshape the network, please retry the demo without '-r' option"
                )
                sys.exit(-1)
        else:
            log.debug(
                "\tSkipping network reshaping,"
                " as (context length + max question length) exceeds the current (input) network sequence length"
            )
    model.log_layers_info()

    pipeline = AsyncPipeline(model)

    if args.questions:

        def questions():
            for question in args.questions:
                log.info("\n\tQuestion: {}".format(question))
                yield question
    else:

        def questions():
            while True:
                yield input('\n\tType a question (empty string to exit): ')

    for question in questions():
        if not question.strip():
            break

        answers = []
        next_window_id = 0
        next_window_id_to_show = 0
        start_time = perf_counter()
        q_tokens_id, _ = text_to_tokens(question.lower(), vocab)
        source = ContextSource(q_tokens_id, c_tokens, model.max_length)

        while True:
            if pipeline.callback_exceptions:
                raise pipeline.callback_exceptions[0]
            results = pipeline.get_result(next_window_id_to_show)
            if results:
                next_window_id_to_show += 1
                update_answers_list(answers, results[0])
                continue

            if pipeline.is_ready():
                if source.is_over():
                    break
                pipeline.submit_data(source.get_data(), next_window_id, None)
                next_window_id += 1
            else:
                pipeline.await_any()

        pipeline.await_all()
        for window_id in range(next_window_id_to_show, next_window_id):
            results = pipeline.get_result(window_id)
            while results is None:
                results = pipeline.get_result(window_id)
            update_answers_list(answers, results[0])

        visualizer.show_answers(answers)
        total_latency += (perf_counter() - start_time) * 1e3

    log.info("Metrics report:")
    log.info("\tLatency: {:.1f} ms".format(total_latency))
Exemplo n.º 11
0
def main():
    args = build_argparser().parse_args()

    cap = open_images_capture(args.input, args.loop)
    delay = int(cap.get_type() in {'VIDEO', 'CAMERA'})

    if args.adapter == 'openvino':
        plugin_config = get_user_config(args.device, args.num_streams,
                                        args.num_threads)
        model_adapter = OpenvinoAdapter(
            create_core(),
            args.model,
            device=args.device,
            plugin_config=plugin_config,
            max_num_requests=args.num_infer_requests)
    elif args.adapter == 'ovms':
        model_adapter = OVMSAdapter(args.model)

    config = {
        'mean_values': args.mean_values,
        'scale_values': args.scale_values,
        'reverse_input_channels': args.reverse_input_channels,
        'topk': args.topk,
        'path_to_labels': args.labels
    }
    model = Classification(model_adapter, config)
    model.log_layers_info()

    async_pipeline = AsyncPipeline(model)

    next_frame_id = 0
    next_frame_id_to_show = 0

    metrics = PerformanceMetrics()
    render_metrics = PerformanceMetrics()
    presenter = None
    output_transform = None
    video_writer = cv2.VideoWriter()
    ESC_KEY = 27
    key = -1
    while True:
        if async_pipeline.callback_exceptions:
            raise async_pipeline.callback_exceptions[0]
        # Process all completed requests
        results = async_pipeline.get_result(next_frame_id_to_show)
        if results:
            classifications, frame_meta = results
            frame = frame_meta['frame']
            start_time = frame_meta['start_time']
            if args.raw_output_message:
                print_raw_results(classifications, next_frame_id_to_show)

            presenter.drawGraphs(frame)
            rendering_start_time = perf_counter()
            frame = draw_labels(frame, classifications, output_transform)
            if delay or args.no_show:
                render_metrics.update(rendering_start_time)
                metrics.update(start_time, frame)

            if video_writer.isOpened() and (
                    args.output_limit <= 0
                    or next_frame_id_to_show <= args.output_limit - 1):
                video_writer.write(frame)
            next_frame_id_to_show += 1

            if not args.no_show:
                cv2.imshow('Classification Results', frame)
                key = cv2.waitKey(delay)
                # Quit.
                if key in {ord('q'), ord('Q'), ESC_KEY}:
                    break
                presenter.handleKey(key)
            continue

        if async_pipeline.is_ready():
            # Get new image/frame
            start_time = perf_counter()
            frame = cap.read()
            if frame is None:
                if next_frame_id == 0:
                    raise ValueError("Can't read an image from the input")
                break
            if next_frame_id == 0:
                output_transform = OutputTransform(frame.shape[:2],
                                                   args.output_resolution)
                if args.output_resolution:
                    output_resolution = output_transform.new_resolution
                else:
                    output_resolution = (frame.shape[1], frame.shape[0])
                presenter = monitors.Presenter(
                    args.utilization_monitors, 55,
                    (round(output_resolution[0] / 4),
                     round(output_resolution[1] / 8)))
                if args.output and not video_writer.open(
                        args.output, cv2.VideoWriter_fourcc(*'MJPG'),
                        cap.fps(), output_resolution):
                    raise RuntimeError("Can't open video writer")
            # Submit for inference
            async_pipeline.submit_data(frame, next_frame_id, {
                'frame': frame,
                'start_time': start_time
            })
            next_frame_id += 1

        else:
            # Wait for empty request
            async_pipeline.await_any()

    async_pipeline.await_all()
    if key not in {ord('q'), ord('Q'), ESC_KEY}:
        # Process completed requests
        for next_frame_id_to_show in range(next_frame_id_to_show,
                                           next_frame_id):
            results = async_pipeline.get_result(next_frame_id_to_show)
            while results is None:
                results = async_pipeline.get_result(next_frame_id_to_show)
            classifications, frame_meta = results
            frame = frame_meta['frame']
            start_time = frame_meta['start_time']

            if args.raw_output_message:
                print_raw_results(classifications, next_frame_id_to_show)

            presenter.drawGraphs(frame)
            rendering_start_time = perf_counter()
            frame = draw_labels(frame, classifications, output_transform)
            if delay or args.no_show:
                render_metrics.update(rendering_start_time)
                metrics.update(start_time, frame)

            if video_writer.isOpened() and (
                    args.output_limit <= 0
                    or next_frame_id_to_show <= args.output_limit - 1):
                video_writer.write(frame)

            if not args.no_show:
                cv2.imshow('Classification Results', frame)
                key = cv2.waitKey(delay)

                # Quit.
                if key in {ord('q'), ord('Q'), ESC_KEY}:
                    break
                presenter.handleKey(key)

    if delay or args.no_show:
        metrics.log_total()
        log_latency_per_stage(cap.reader_metrics.get_latency(),
                              async_pipeline.preprocess_metrics.get_latency(),
                              async_pipeline.inference_metrics.get_latency(),
                              async_pipeline.postprocess_metrics.get_latency(),
                              render_metrics.get_latency())
    for rep in presenter.reportMeans():
        log.info(rep)
Exemplo n.º 12
0
def main():
    args = build_argparser().parse_args()

    cap = open_images_capture(args.input, args.loop)

    if args.adapter == 'openvino':
        plugin_config = get_user_config(args.device, args.num_streams, args.num_threads)
        model_adapter = OpenvinoAdapter(create_core(), args.model, device=args.device, plugin_config=plugin_config,
                                        max_num_requests=args.num_infer_requests,
                                        model_parameters={'input_layouts': args.layout})
    elif args.adapter == 'ovms':
        model_adapter = OVMSAdapter(args.model)

    configuration = {
        'confidence_threshold': args.prob_threshold,
        'path_to_labels': args.labels,
    }
    model = get_model(model_adapter, configuration)
    model.log_layers_info()

    pipeline = AsyncPipeline(model)

    next_frame_id = 0
    next_frame_id_to_show = 0

    tracker = None
    if not args.no_track and cap.get_type() in {'VIDEO', 'CAMERA'}:
        tracker = StaticIOUTracker()
    visualizer = InstanceSegmentationVisualizer(model.labels, args.show_boxes, args.show_scores)

    metrics = PerformanceMetrics()
    render_metrics = PerformanceMetrics()
    presenter = None
    output_transform = None
    video_writer = cv2.VideoWriter()

    while True:
        if pipeline.is_ready():
            # Get new image/frame
            start_time = perf_counter()
            frame = cap.read()
            if frame is None:
                if next_frame_id == 0:
                    raise ValueError("Can't read an image from the input")
                break
            if next_frame_id == 0:
                output_transform = OutputTransform(frame.shape[:2], args.output_resolution)
                if args.output_resolution:
                    output_resolution = output_transform.new_resolution
                else:
                    output_resolution = (frame.shape[1], frame.shape[0])
                presenter = monitors.Presenter(args.utilization_monitors, 55,
                                               (round(output_resolution[0] / 4), round(output_resolution[1] / 8)))
                if args.output and not video_writer.open(args.output, cv2.VideoWriter_fourcc(*'MJPG'),
                                                         cap.fps(), tuple(output_resolution)):
                    raise RuntimeError("Can't open video writer")
            # Submit for inference
            pipeline.submit_data(frame, next_frame_id, {'frame': frame, 'start_time': start_time})
            next_frame_id += 1
        else:
            # Wait for empty request
            pipeline.await_any()

        if pipeline.callback_exceptions:
            raise pipeline.callback_exceptions[0]
        # Process all completed requests
        results = pipeline.get_result(next_frame_id_to_show)
        if results:
            (scores, classes, boxes, masks), frame_meta = results
            frame = frame_meta['frame']
            start_time = frame_meta['start_time']

            if args.raw_output_message:
                print_raw_results(boxes, classes, scores, next_frame_id_to_show)

            rendering_start_time = perf_counter()
            masks_tracks_ids = tracker(masks, classes) if tracker else None
            frame = visualizer(frame, boxes, classes, scores, masks, masks_tracks_ids)
            render_metrics.update(rendering_start_time)

            presenter.drawGraphs(frame)
            metrics.update(start_time, frame)

            if video_writer.isOpened() and (args.output_limit <= 0 or next_frame_id_to_show <= args.output_limit - 1):
                video_writer.write(frame)
            next_frame_id_to_show += 1

            if not args.no_show:
                cv2.imshow('Instance Segmentation results', frame)
                key = cv2.waitKey(1)
                if key == 27 or key == 'q' or key == 'Q':
                    break
                presenter.handleKey(key)

    pipeline.await_all()
    if pipeline.callback_exceptions:
        raise pipeline.callback_exceptions[0]
    # Process completed requests
    for next_frame_id_to_show in range(next_frame_id_to_show, next_frame_id):
        results = pipeline.get_result(next_frame_id_to_show)
        (scores, classes, boxes, masks), frame_meta = results
        frame = frame_meta['frame']
        start_time = frame_meta['start_time']

        if args.raw_output_message:
            print_raw_results(boxes, classes, scores, next_frame_id_to_show)

        rendering_start_time = perf_counter()
        masks_tracks_ids = tracker(masks, classes) if tracker else None
        frame = visualizer(frame, boxes, classes, scores, masks, masks_tracks_ids)
        render_metrics.update(rendering_start_time)

        presenter.drawGraphs(frame)
        metrics.update(start_time, frame)

        if video_writer.isOpened() and (args.output_limit <= 0 or next_frame_id_to_show <= args.output_limit - 1):
            video_writer.write(frame)

        if not args.no_show:
            cv2.imshow('Instance Segmentation results', frame)
            cv2.waitKey(1)

    metrics.log_total()
    log_latency_per_stage(cap.reader_metrics.get_latency(),
                          pipeline.preprocess_metrics.get_latency(),
                          pipeline.inference_metrics.get_latency(),
                          pipeline.postprocess_metrics.get_latency(),
                          render_metrics.get_latency())
    for rep in presenter.reportMeans():
        log.info(rep)
Exemplo n.º 13
0
def main():
    args = build_argparser().parse_args()

    paragraphs = get_paragraphs(args.input)

    vocab_start_time = perf_counter()
    vocab = load_vocab_file(args.vocab)
    log.debug("Loaded vocab file from {}, get {} tokens".format(
        args.vocab, len(vocab)))
    visualizer = Visualizer(args.colors)
    total_latency = (perf_counter() - vocab_start_time) * 1e3

    ie = create_core()
    plugin_config = get_user_config(args.device, args.num_streams,
                                    args.num_threads)
    model_emb_adapter = OpenvinoAdapter(
        ie,
        args.model_emb,
        device=args.device,
        plugin_config=plugin_config,
        max_num_requests=args.num_infer_requests)
    model_emb = BertEmbedding(model_emb_adapter, {
        'vocab': vocab,
        'input_names': args.input_names_emb
    })
    model_emb.log_layers_info()

    # reshape BertEmbedding model to infer short questions and long contexts
    max_len_context = 384
    max_len_question = 32

    for new_length in [max_len_question, max_len_context]:
        model_emb.reshape(new_length)
        if new_length == max_len_question:
            emb_exec_net = ie.load_network(model_emb_adapter.net, args.device)
        else:
            emb_pipeline = AsyncPipeline(model_emb)

    if args.model_qa:
        model_qa_adapter = OpenvinoAdapter(
            ie,
            args.model_qa,
            device=args.device,
            plugin_config=plugin_config,
            max_num_requests=args.num_infer_requests)
        config = {
            'vocab': vocab,
            'input_names': args.input_names_qa,
            'output_names': args.output_names_qa,
            'max_answer_token_num': args.max_answer_token_num,
            'squad_ver': args.model_qa_squad_ver
        }
        model_qa = BertQuestionAnswering(model_qa_adapter, config)
        model_qa.log_layers_info()
        qa_pipeline = AsyncPipeline(model_qa)

    log.info("\t\tStage 1    (Calc embeddings for the context)")
    contexts_all = []
    start_time = perf_counter()

    # get context as string and then encode it into token id list
    # calculate number of tokens for context in each request.
    # reserve 3 positions for special tokens [CLS] q_tokens [SEP] c_tokens [SEP]
    if args.model_qa:
        # to make context be able to pass model_qa together with question
        c_window_len = model_qa.max_length - (max_len_question + 3)
    else:
        # to make context be able to pass model_emb without question
        c_window_len = max_len_context - 2

    def calc_question_embedding(tokens_id):
        num = min(max_len_question - 2, len(tokens_id))
        inputs, _ = model_emb.preprocess((tokens_id[:num], max_len_question))
        raw_result = emb_exec_net.infer(inputs)
        return model_emb.postprocess(raw_result, None)

    source = ContextSource(paragraphs, vocab, c_window_len)
    next_window_id = 0
    next_window_id_to_show = 0
    contexts_all = []

    while True:
        if emb_pipeline.callback_exceptions:
            raise emb_pipeline.callback_exceptions[0]
        results = emb_pipeline.get_result(next_window_id_to_show)
        if results:
            embedding, meta = results
            meta['c_data'].emb = embedding
            contexts_all.append(meta['c_data'])
            next_window_id_to_show += 1
            continue

        if emb_pipeline.is_ready():
            if source.is_over():
                break
            c_data = source.get_data()
            num = min(max_len_context - 2, len(c_data.c_tokens_id))
            emb_pipeline.submit_data(
                (c_data.c_tokens_id[:num], max_len_context), next_window_id,
                {'c_data': c_data})
            next_window_id += 1
        else:
            emb_pipeline.await_any()

    emb_pipeline.await_all()
    for window_id in range(next_window_id_to_show, next_window_id):
        results = emb_pipeline.get_result(window_id)
        while results is None:
            results = emb_pipeline.get_result(window_id)
        embedding, meta = results
        meta['c_data'].emb = embedding
        contexts_all.append(meta['c_data'])
        next_window_id_to_show += 1

    total_latency += (perf_counter() - start_time) * 1e3
    context_embeddings_time = total_latency

    if args.questions:

        def questions():
            for question in args.questions:
                log.info("\n\tQuestion: {}".format(question))
                yield question
    else:

        def questions():
            while True:
                yield input('\n\tType a question (empty string to exit): ')

    for question in questions():
        if not question.strip():
            break

        start_time = perf_counter()
        log.info(
            "\t\tStage 2    (Calc question embedding and compare with {} context embeddings)"
            .format(len(contexts_all)))
        q_tokens_id, _ = text_to_tokens(question.lower(), vocab)
        q_emb = calc_question_embedding(q_tokens_id)
        distances = [(np.linalg.norm(context.emb - q_emb, 2), context)
                     for context in contexts_all]
        distances.sort(key=lambda x: x[0])
        keep_num = min(args.best_n, len(distances))
        distances_filtered = distances[:keep_num]

        log.info(
            "The closest {} contexts to question filtered from {} context embeddings:"
            .format(keep_num, len(distances)))
        visualizer.show_closest_contexts(distances_filtered)

        if args.model_qa:
            answers = []
            next_context_id = 0
            next_context_id_to_show = 0

            while True:
                if qa_pipeline.callback_exceptions:
                    raise qa_pipeline.callback_exceptions[0]
                results = qa_pipeline.get_result(next_context_id_to_show)
                if results:
                    next_context_id_to_show += 1
                    output, meta = results
                    update_answers_list(answers, output, meta['c_data'])
                    continue

                if qa_pipeline.is_ready():
                    if next_context_id == len(distances_filtered):
                        break
                    _, c_data = distances_filtered[next_context_id]
                    qa_pipeline.submit_data((c_data, q_tokens_id),
                                            next_context_id,
                                            {'c_data': c_data})
                    next_context_id += 1
                else:
                    qa_pipeline.await_any()

            qa_pipeline.await_all()
            for context_id in range(next_context_id_to_show, next_context_id):
                results = qa_pipeline.get_result(context_id)
                while results is None:
                    results = qa_pipeline.get_result(context_id)
                output, meta = results
                update_answers_list(answers, output, meta['c_data'])

            log.info(
                "\t\tStage 3    (Show top 3 answers from {} closest contexts of Stage 1)"
                .format(len(answers)))
            answers = sorted(answers, key=lambda x: -x[0])[:3]
            visualizer.show_answers(answers)

        total_latency += (perf_counter() - start_time) * 1e3

    log.info("Metrics report:")
    log.info("\tContext embeddings latency (stage 1): {:.1f} ms".format(
        context_embeddings_time))
    log.info("\tLatency (all stages): {:.1f} ms".format(total_latency))