コード例 #1
0
def build_executors(xml_file, bin_file, devices):
    threads_list = []
    image = cv2.imread("images/road.jpeg")
    dst_shape = (896, 512)
    input_images = cv2.dnn.blobFromImages([image], 1, dst_shape, swapRB=True)
    input_images_dict = {"data": input_images}

    for device in devices:
        if device == "CPU":
            plugin = IEPlugin(device)
            requests_num = benchmark_config["cpu_request_num"]
            config = {"CPU_THREADS_NUM": "0", "CPU_THROUGHPUT_STREAMS": str(requests_num)}
            plugin.add_cpu_extension(path_to_cpu_extention)
        elif device == "MYRIAD":
            plugin = IEPlugin("HDDL")
            config = {"LOG_LEVEL": "LOG_INFO",
                      "VPU_LOG_LEVEL": "LOG_INFO"}
            requests_num = benchmark_config["myriad_request_num"]
        else:
            raise ValueError('Unidentified device "{}"!'.format(device))
            sys.exit(-1)

        plugin.set_config(config)
        ie_network = IENetwork(xml_file, bin_file)
        exe_network = plugin.load(ie_network, requests_num)
        infer_executor = InferExecutor(exe_network, input_images_dict)
        executor_thread = InferExecutorThread(device, infer_executor, running_time)
        threads_list.append(executor_thread)
    return threads_list
コード例 #2
0
def test_set_config(device):
    with warnings.catch_warnings(record=True) as w:
        plugin = IEPlugin("HETERO:CPU")
        plugin.set_config({"TARGET_FALLBACK": "CPU,GPU"})
    assert len(w) == 1
    assert "IEPlugin class is deprecated. " \
               "Please use IECore class instead." in str(w[0].message)
コード例 #3
0
def load_model(feature,model_xml,device,plugin_dirs,input_key_length,output_key_length,cpu_extension):

    model_bin = os.path.splitext(model_xml)[0] + ".bin"

    log.info("Initializing plugin for {} device...".format(device))
    plugin = IEPlugin(device, plugin_dirs)

    log.info("Loading network files for {}".format(feature))
    if cpu_extension and 'CPU' in device:
        plugin.add_cpu_extension(cpu_extension)
    else:
        plugin.set_config({"PERF_COUNT":"YES"})

    net = IENetwork(model=model_xml, weights=model_bin)

    if plugin.device == "CPU":
        supported_layers = plugin.get_supported_layers(net)
        not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]
        if len(not_supported_layers) != 0:
            log.error("Following layers are not supported by the plugin for specified device {}:\n {}".
                      format(plugin.device, ', '.join(not_supported_layers)))
            log.error("Please try to specify cpu extensions library path in demo's command line parameters using -l "
                      "or --cpu_extension command line argument")
            sys.exit(1)
    
    log.info("Checking {} network inputs".format(feature))
    assert len(net.inputs.keys()) == input_key_length, "Demo supports only single input topologies"
    log.info("Checking {} network outputs".format(feature))
    assert len(net.outputs) == output_key_length, "Demo supports only single output topologies"
    return plugin,net
コード例 #4
0
def main():
    args = build_argparser().parse_args()
    assert args.device.split(':')[0] == "HETERO", "This sample supports only Hetero Plugin. " \
                                                  "Please specify correct device, e.g. HETERO:FPGA,CPU"
    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"

    # Plugin initialization for specified device and load extensions library if specified
    plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
    if args.cpu_extension and 'CPU' in args.device:
        plugin.add_cpu_extension(args.cpu_extension)
    # Read IR
    net = IENetwork.from_ir(model=model_xml, weights=model_bin)

    layers = net.get_layers()
    net_ops = set([l['type'] for l in layers.values()])
    if not any(op in net_ops for op in ("Convolution", "Concat")):
        print("Specified IR doesn't contain any Convolution or Concat operations for which affinity going to be set.\n"
              "Try to use another topology to make the affinity setting result more visible.")

    # Configure the plugin to initialize default affinity for network in set_initial_affinity() function.
    plugin.set_config({"TARGET_FALLBACK": args.device.split(':')[1]})
    # Enable graph visualization
    plugin.set_config({"HETERO_DUMP_GRAPH_DOT": "YES"})
    plugin.set_initial_affinity(net)

    net.set_affinity(types_affinity_map={"Convolution": "GPU", "Concat": "CPU"})
    # Affinity setting example based on layer name.
    # layers_affinity_map has higher priority and will overrides affinity set by layer type.
    # net.set_affinity(types_affinity_map={"Convolution": "GPU", "Concat": "CPU"},
    #                  layers_affinity_map={"fire4/expand3x3/Conv2D": "CPU"})

    assert len(net.inputs.keys()) == 1, "Sample supports only single input topologies"
    assert len(net.outputs) == 1, "Sample supports only single output topologies"
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))
    # Read and pre-process input image
    n, c, h, w = net.inputs[input_blob]
    image = cv2.imread(args.input)
    image = cv2.resize(image, (w, h))
    image = image.transpose((2, 0, 1))  # Change data layout from HWC to CHW
    image = image.reshape((n, c, h, w))
    # Load network to the plugin
    exec_net = plugin.load(network=net)
    del net
    # Start sync inference
    res = exec_net.infer(inputs={input_blob: image})
    top_ind = np.argsort(res[out_blob], axis=1)[0, -args.number_top:][::-1]
    for i in top_ind:
        print("%f #%d" % (res[out_blob][0, i], i))
    del exec_net
    del plugin
    cwd = os.getcwd()
    print(
        "Graphs representing default and resulting affinities dumped to {} and {} files respectively"
            .format(os.path.join(cwd, 'hetero_affinity.dot'), os.path.join(cwd, 'hetero_subgraphs.dot'))
    )
コード例 #5
0
ファイル: main.py プロジェクト: bosques-urbanos/iikim
def create_ie_plugin(device='CPU', cpu_extension=None, plugin_dir=None):
    print("Initializing plugin for {} device...".format(device))
    plugin = IEPlugin(device, plugin_dirs=plugin_dir)

    if 'MYRIAD' in device:
        myriad_config = {"VPU_HW_STAGES_OPTIMIZATION": "YES"}
        plugin.set_config(myriad_config)

    if cpu_extension and 'CPU' in device:
        plugin.add_cpu_extension(cpu_extension)

    return plugin
コード例 #6
0
def prepare_model(log, model, weights, cpu_extension, device_list, plugin_dir,
                  thread_num):
    model_xml = model
    model_bin = weights
    if len(device_list) == 1:
        device = device_list[0]
    elif len(device_list) == 2:
        device = 'HETERO:{},{}'.format(device_list[0], device_list[1])
    else:
        log.error('Wrong count devices')
        sys.exit(1)
    log.info('Plugin initialization.')
    plugin = IEPlugin(device=device, plugin_dirs=plugin_dir)
    if cpu_extension and 'CPU' in device:
        plugin.add_cpu_extension(cpu_extension)
    log.info('Loading network files:\n\t {0}\n\t {1}'.format(
        model_xml, model_bin))
    net = IENetwork(model=model_xml, weights=model_bin)
    if plugin.device == 'CPU':
        supported_layers = plugin.get_supported_layers(net)
        not_supported_layers = [ l for l in net.layers.keys() \
            if l not in supported_layers ]
        if len(not_supported_layers) != 0:
            log.error('Following layers are not supported by the plugin \
                for specified device {0}:\n {1}'.format(
                plugin.device, ', '.join(not_supported_layers)))
            log.error('Please try to specify cpu extensions library path in \
                sample\'s command line parameters using -l or --cpu_extension \
                command line argument')
            sys.exit(1)
    if thread_num is not None:
        if 'CPU' in device_list:
            plugin.set_config({'CPU_THREADS_NUM': str(thread_num)})
        else:
            log.error('Parameter : Number of threads is used only for CPU')
            sys.exit(1)
    if len(device_list) == 2:
        plugin.set_config({'TARGET_FALLBACK': device})
        plugin.set_initial_affinity(net)
    return net, plugin
コード例 #7
0
ファイル: infer_IE_TF.py プロジェクト: FionaZZ92/OpenVINO
def main_IE_infer():
    args = build_argparser().parse_args()
    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"

    # Read & reszie  input image
    m_input_size = 513
    image = cv2.imread(args.input)
    ratio = 1.0 * m_input_size / max(
        image.shape[0], image.shape[1])  #513 is the shape of inputfor model
    shrink_size = (int(ratio * image.shape[1]), int(ratio * image.shape[0]))
    image = cv2.resize(image, shrink_size, interpolation=cv2.INTER_CUBIC)

    # Plugin initialization for specified device and load extensions library if specified
    plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
    if args.cpu_extension and 'CPU' in args.device:
        plugin.add_cpu_extension(args.cpu_extension)
    if args.performance:
        plugin.set_config({"PERF_COUNT": "YES"})
    # Read IR
    net = IENetwork.from_ir(model=model_xml, weights=model_bin)

    input_blob = next(iter(net.inputs))
    exec_net = plugin.load(network=net)
    for itr in range(5):
        now = time.time()
        image_ = _pre._pre_process(image)
        image_ = image_.transpose(
            (0, 3, 1, 2))  # Change data layout from NHWC to NCHW
        # Do inference
        res = exec_net.infer(inputs={input_blob: image_})
        result = _post._post_process(res['ArgMax/Squeeze'], image)[0]
        print('time cost:', time.time() - now)
        result[result > 0] = 255
        cv2.imwrite('./test_img/result_deeplabv3.jpg', result)
    del net
    del exec_net
    del plugin
コード例 #8
0
def build_executors(xml_file, bin_file, devices, requests_num):
    threads_list = []
    image = cv2.imread("images/road.jpeg")
    dst_shape = (896, 512)
    input_images = cv2.dnn.blobFromImages([image], 1, dst_shape, swapRB=True)
    input_images_dict = {"data": input_images}

    for device in devices:
        plugin = IEPlugin(device)
        if device == "CPU":
            config = {"CPU_THREADS_NUM": "0", "CPU_THROUGHPUT_STREAMS": str(request_num)}
            plugin.add_cpu_extension(path_to_cpu_extention)
        elif device == "HDDL":
            config = {"LOG_LEVEL": "LOG_INFO",
                      "VPU_LOG_LEVEL": "LOG_INFO"}
        else:
            config = {}
        plugin.set_config(config)
        ie_network = IENetwork(xml_file, bin_file)
        exe_network = plugin.load(ie_network, requests_num)
        infer_executor = InferExecutor(exe_network, input_images_dict)
        executor_thread = InferExecutorThread(device, infer_executor)
        threads_list.append(executor_thread)
    return threads_list
コード例 #9
0
import time
from openvino.inference_engine import IENetwork, IEPlugin

model_xml = '/opt/intel/computer_vision_sdk_2018.4.420/deployment_tools/model_optimizer/10_lrmodels/UNet/FP16/semanticsegmentation_frozen_person_32.xml'
model_bin = '/opt/intel/computer_vision_sdk_2018.4.420/deployment_tools/model_optimizer/10_lrmodels/UNet/FP16/semanticsegmentation_frozen_person_32.bin'
net = IENetwork.from_ir(model=model_xml, weights=model_bin)
seg_image = Image.open("data/input/009649.png")
palette = seg_image.getpalette()  # Get a color palette
index_void = 2  # Define index_void Back Ground
camera_width = 320
camera_height = 240
fps = ""
elapsedTime = 0

plugin = IEPlugin(device="HETERO:MYRIAD,CPU")
plugin.set_config({"TARGET_FALLBACK": "HETERO:MYRIAD,CPU"})
plugin.set_initial_affinity(net)
#plugin = IEPlugin(device="CPU")
exec_net = plugin.load(network=net)

input_blob = next(iter(net.inputs))  #input_blob = 'input'
out_blob = next(iter(net.outputs))  #out_blob   = 'output/BiasAdd'
n, c, h, w = net.inputs[input_blob].shape  #n, c, h, w = 1, 3, 256, 256

del net

cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FPS, 30)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, camera_width)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, camera_height)
time.sleep(1)
コード例 #10
0
def main():
    log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
    args = build_argparser().parse_args()
    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"

    # Plugin initialization for specified device and load extensions library if specified
    plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)

    # Configure plugin to support dynamic batch size
    plugin.set_config({"DYN_BATCH_ENABLED": "YES"})

    # Load cpu_extensions library if specified
    if args.cpu_extension and 'CPU' in args.device:
        plugin.add_cpu_extension(args.cpu_extension)

    # Read IR
    log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
    net = IENetwork(model=model_xml, weights=model_bin)

    # Check for unsupported layers if the device is 'CPU'
    if plugin.device == "CPU":
        unsupported_layers = [layer for layer in net.layers if layer not in plugin.get_supported_layers(net)]
        if len(unsupported_layers) != 0:
            log.error("Following layers are not supported by the plugin for specified device {}:\n {}".
                      format(plugin.device, ', '.join(unsupported_layers)))
            log.error("Please try to specify cpu extensions library path in sample's command line parameters using -l "
                      "or --cpu_extension command line argument")
            sys.exit(1)

    assert len(net.inputs.keys()) == 1, "Sample supports only single input topologies"

    log.info("Preparing input blobs")
    input_blob = next(iter(net.inputs))

    # Set max batch size
    inputs_count = len(args.input)
    if args.max_batch < inputs_count:
        log.warning("Defined max_batch size {} less than input images count {}."
                    "\n\t\t\tInput images count will be used as max batch size".format(args.max_batch, inputs_count))
    net.batch_size = max(args.max_batch, inputs_count)

    # Create numpy array for the max_batch size images
    n, c, h, w = net.inputs[input_blob].shape
    images = np.zeros(shape=(n, c, h, w))

    # Read and pre-process input images
    for i in range(inputs_count):
        image = cv2.imread(args.input[i])
        if image.shape[:-1] != (h, w):
            log.warning("Image {} is resized from {} to {}".format(args.input[i], image.shape[:-1], (h, w)))
            image = cv2.resize(image, (w, h))
        image = image.transpose((2, 0, 1))  # Change data layout from HWC to CHW
        images[i] = image
    log.info("Batch size is {}".format(n))

    # Loading model to the plugin
    log.info("Loading model to the plugin")
    exec_net = plugin.load(network=net)

    def infer():
        for i in range(args.number_iter):
            t0 = time()
            exec_net.infer(inputs={input_blob: images})
            infer_time.append((time() - t0) * 1000)
        log.info("Average running time of one iteration: {} ms".format(np.average(np.asarray(infer_time))))
        if args.perf_counts:
            perf_counts = exec_net.requests[0].get_perf_counts()
            log.info("Performance counters:")
            print("{:<70} {:<15} {:<15} {:<15} {:<10}".format('name', 'layer_type', 'exet_type', 'status',
                                                              'real_time, us'))
            for layer, stats in perf_counts.items():
                print("{:<70} {:<15} {:<15} {:<15} {:<10}".format(layer, stats['layer_type'], stats['exec_type'],
                                                                  stats['status'], stats['real_time']))

    # Start sync inference with full batch size
    log.info(
        "Starting inference with full batch {} ({} iterations)".format(n, args.number_iter)
    )
    infer_time = []
    infer()

    # Set batch size dynamically for the infer request and start sync inference
    infer_time = []
    exec_net.requests[0].set_batch(inputs_count)
    log.info("Starting inference with dynamically defined batch {} for the 2nd infer request ({} iterations)".format(
        inputs_count, args.number_iter))
    infer()
コード例 #11
0
def set_plugin_config(plugin: IEPlugin, config: str = None):
    plugin.set_config(get_config_dictionary(config_file=config))
    return plugin
コード例 #12
0
def main():
    args = parse_arguments()

    # --------------------------------- 1. Load Plugin for inference engine ---------------------------------
    logger.info("Loading plugin")
    plugin = IEPlugin(args.target_device)

    config = dict()
    if 'CPU' in args.target_device:
        if args.path_to_extension:
            plugin.add_cpu_extension(args.path_to_extension)
        if args.number_threads is not None:
            config.update({'CPU_THREADS_NUM': str(args.number_threads)})
    else:
        raise AttributeError(
            "Device {} do not support of 3D convolution. Please use CPU or HETERO:*CPU*"
        )

    if 'GPU' in args.target_device:
        if args.path_to_cldnn_config:
            config.update({'CONFIG_FILE': args.path_to_cldnn_config})
            logger.info("GPU extensions is loaded {}".format(
                args.path_to_cldnn_config))

    plugin.set_config(config)

    logger.info("Device is {}".format(plugin.device))
    logger.info("Plugin version is {}".format(plugin.version))

    # --------------------- 2. Read IR Generated by ModelOptimizer (.xml and .bin files) ---------------------

    xml_filename = os.path.abspath(args.path_to_model)
    bin_filename = os.path.abspath(os.path.splitext(xml_filename)[0] + '.bin')

    ie_network = IENetwork(xml_filename, bin_filename)

    input_info = ie_network.inputs
    if len(input_info) == 0:
        raise AttributeError('No inputs info is provided')
    elif len(input_info) != 1:
        raise AttributeError("only one input layer network is supported")

    input_name = next(iter(input_info))
    out_name = next(iter(ie_network.outputs))

    if args.shape:
        logger.info("Reshape of network from {} to {}".format(
            input_info[input_name].shape, args.shape))
        ie_network.reshape({input_name: args.shape})
        input_info = ie_network.inputs

    # ---------------------------------------- 4. Preparing input data ----------------------------------------
    logger.info("Preparing inputs")

    if len(input_info[input_name].shape) != 5:
        raise AttributeError(
            "Incorrect shape {} for 3d convolution network".format(args.shape))

    n, c, d, h, w = input_info[input_name].shape
    ie_network.batch_size = n

    is_nifti_data = os.path.isdir(args.path_to_input_data)

    if is_nifti_data:
        series_name = find_series_name(args.path_to_input_data)
        original_data, data_crop, affine, original_size, bbox = \
            read_image(args.path_to_input_data, series_name=series_name, sizes=(h, w, d))

    else:
        if not (fnmatch(args.path_to_input_data, '*.tif')
                or fnmatch(args.path_to_input_data, '*.tiff')):
            raise AttributeError("Input file extension must have tiff format")

        data_crop = np.zeros(shape=(n, c, d, h, w), dtype=np.float)
        im_seq = ImageSequence.Iterator(Image.open(args.path_to_input_data))
        for i, page in enumerate(im_seq):
            im = np.array(page).reshape(h, w, c)
            for channel in range(c):
                data_crop[:, channel, i, :, :] = im[:, :, channel]
        original_data = data_crop
        original_size = original_data.shape[-3:]

    test_im = {input_name: data_crop}

    # ------------------------------------- 4. Loading model to the plugin -------------------------------------
    logger.info("Loading model to the plugin")
    executable_network = plugin.load(network=ie_network)
    del ie_network

    # ---------------------------------------------- 5. Do inference --------------------------------------------
    logger.info("Start inference")
    start_time = datetime.now()
    res = executable_network.infer(test_im)
    infer_time = datetime.now() - start_time
    logger.info("Finish inference")
    logger.info("Inference time is {}".format(infer_time))

    # ---------------------------- 6. Processing of the received inference results ------------------------------
    result = res[out_name]
    batch, channels, out_d, out_h, out_w = result.shape

    list_img = list()
    list_seg_result = list()

    logger.info("Processing of the received inference results is started")
    start_time = datetime.now()
    for batch, data in enumerate(result):
        seg_result = np.zeros(shape=original_size, dtype=np.uint8)
        if data.shape[1:] != original_size:
            x = bbox[1] - bbox[0]
            y = bbox[3] - bbox[2]
            z = bbox[5] - bbox[4]
            seg_result[bbox[0]:bbox[1], bbox[2]:bbox[3], bbox[4]:bbox[5]] = \
                np.argmax(resample_np(data, (channels, x, y, z), 1), axis=0)
        elif channels == 1:
            seg_result = data.reshape(out_d, out_h, out_w).astype(int)
        else:
            seg_result = np.argmax(data, axis=0).astype(int)

        im = np.stack([
            original_data[batch, 0, :, :, :], original_data[batch, 0, :, :, :],
            original_data[batch, 0, :, :, :]
        ],
                      axis=3)

        im = 255 * (im - im.min()) / (im.max() - im.min())
        color_seg_frame = np.zeros(im.shape, dtype=np.uint8)
        for idx, c in enumerate(CLASSES_COLOR_MAP):
            color_seg_frame[seg_result[:, :, :] == idx, :] = np.array(
                c, dtype=np.uint8)
        mask = seg_result[:, :, :] > 0
        im[mask] = color_seg_frame[mask]

        for k in range(out_d):
            if is_nifti_data:
                list_img.append(
                    Image.fromarray(im[:, :, k, :].astype('uint8'), 'RGB'))
            else:
                list_img.append(
                    Image.fromarray(im[k, :, :, :].astype('uint8'), 'RGB'))

        if args.output_nifti and is_nifti_data:
            list_seg_result.append(seg_result)

    result_processing_time = datetime.now() - start_time
    logger.info("Processing of the received inference results is finished")
    logger.info("Processing time is {}".format(result_processing_time))

    # --------------------------------------------- 7. Save output -----------------------------------------------
    tiff_output_name = os.path.join(args.path_to_output, 'output.tiff')
    Image.new('RGB',
              (data.shape[3], data.shape[2])).save(tiff_output_name,
                                                   append_images=list_img,
                                                   save_all=True)
    logger.info("Result tiff file was saved to {}".format(tiff_output_name))

    if args.output_nifti and is_nifti_data:
        for seg_res in list_seg_result:
            nii_filename = os.path.join(
                args.path_to_output,
                'output_{}.nii.gz'.format(list_seg_result.index(seg_res)))
            nib.save(nib.Nifti1Image(seg_res, affine=affine), nii_filename)
            logger.info(
                "Result nifti file was saved to {}".format(nii_filename))
コード例 #13
0
ファイル: benchmark.py プロジェクト: gerainthub13/learnAI
def main(args=None):
    try:
        if args is None:
            args = parse_args()

        validate_args(args)

        # --------------------------------- 1. Load Plugin for inference engine ---------------------------------
        logging.info("Loading plugin")
        plugin = IEPlugin(args.target_device)

        config = dict()
        if CPU_DEVICE_NAME in args.target_device:
            if args.path_to_extension:
                plugin.add_cpu_extension(args.path_to_extension)
            # limit threading for CPU portion of inference
            if args.number_threads is not None:
                config.update({'CPU_THREADS_NUM': str(args.number_threads)})
            # pin threads for CPU portion of inference
            config.update({'CPU_BIND_THREAD': args.infer_threads_pinning})
            # for pure CPU execution, more throughput-oriented execution via streams
            if args.api_type == 'async' and CPU_DEVICE_NAME in args.target_device:
                config.update({'CPU_THROUGHPUT_STREAMS': str(args.number_infer_requests)})
        elif GPU_DEVICE_NAME in args.target_device:
            if args.path_to_cldnn_config:
                config.update({'CONFIG_FILE': args.path_to_cldnn_config})
                logger.info("GPU extensions is loaded {}".format(args.path_to_cldnn_config))
        elif MYRIAD_DEVICE_NAME in args.target_device:
            config.update({'LOG_LEVEL': 'LOG_INFO'})
            config.update({'VPU_LOG_LEVEL': 'LOG_INFO'})

        plugin.set_config(config)

        logger.info("Device is {}".format(plugin.device))
        logger.info("Plugin version is {}".format(plugin.version))

        # --------------------- 2. Read IR Generated by ModelOptimizer (.xml and .bin files) ---------------------
        logger.info("Loading network files")

        xml_filename = os.path.abspath(args.path_to_model)
        head, tail = os.path.splitext(xml_filename)
        bin_filename = os.path.abspath(head + BIN_EXTENSION)

        ie_network = IENetwork(xml_filename, bin_filename)

        input_info = ie_network.inputs

        if len(input_info) == 0:
            raise AttributeError('No inputs info is provided')
        elif len(input_info) != 1:
            raise AttributeError("only one input layer network is supported")

        # -------------------------------------- 3. Change network batch_size  -------------------------------------
        batch_size = ie_network.batch_size
        key = list(input_info.keys()).pop()
        precision = input_info[key].precision

        if args.batch_size and args.batch_size != ie_network.batch_size:
            # deepcopy input_info
            shape = input_info[key].shape
            # We support models having only one input layers
            if input_info[key].layout != LAYOUT_TYPE:
                raise Exception('Unsupported model for batch size changing in automatic mode')
            shape[BATCH_SIZE_ELEM] = args.batch_size
            ie_network.reshape({key: shape})

            input_info = ie_network.inputs

            batch_size = args.batch_size


        logger_message = "Network batch size was changed to: " if args.batch_size is not None else "Network batch size: "
        logger_message += " {}, precision: {}".format(batch_size, precision)
        logger.info(logger_message)

        # ------------------------------------- 4. Loading model to the plugin -------------------------------------
        logger.info("Loading model to the plugin")
        exe_network = plugin.load(ie_network, args.number_infer_requests)

        # ------------------------------------ 5. Performance measurements stuff -----------------------------------
        inputs = get_images(os.path.abspath(args.path_to_images), batch_size)

        if batch_size < len(inputs):
            logger.warn("Network batch size {} is less then images count  {}"
                        ", some input files will be ignored".format(batch_size, len(inputs)))

        input_images = {key: fill_blob_with_image(inputs, input_info[key].shape)}

        times = list()
        duration = 0

        if args.number_iterations is None:
            duration = get_duration_in_secs(args.target_device)

        if args.api_type == 'sync':

            # warming up - out of scope
            exe_network.infer(input_images)

            if args.number_iterations is not None:
                logger.info(
                    "Start inference synchronously ({}) sync inference executions".format(args.number_iterations))
                for iteration in range(args.number_iterations):
                    sync_infer_request(exe_network, times, input_images)

            else:
                logger.info("Start inference synchronously ({} s duration)".format(duration))
                start_time = datetime.now()
                current_time = start_time
                while (current_time - start_time).total_seconds() < duration:
                    current_time = sync_infer_request(exe_network, times, input_images)

            times.sort()
            latency = median(times)
            fps = batch_size / latency

            print("[BENCHMARK RESULT] Latency is {:.4f} msec".format(latency * 1e3))
            print("[BENCHMARK RESULT] Throughput is {:.4f} FPS".format(fps))
        else:
            infer_requests = exe_network.requests

            if args.number_iterations is not None:
                logger.info("Start inference asynchronously ({}"
                            " async inference executions, {} "
                            " inference requests in parallel".format(args.number_iterations,
                                                                       args.number_infer_requests))
            else:
                logger.info("Start inference asynchronously ({} s duration, "
                            "{} inference requests in parallel)".format(duration, args.number_infer_requests))

            current_inference = 0
            required_inference_requests_were_executed = False
            previous_inference = 1 - args.number_infer_requests
            step = 0
            steps_count = args.number_infer_requests - 1
            if args.number_iterations is not None:
                steps_count += args.number_iterations

            # warming up - out of scope
            infer_requests[0].async_infer(input_images)
            infer_requests[0].wait()

            start_time = datetime.now()
            while not required_inference_requests_were_executed or step < steps_count or \
                    args.number_iterations is None and (datetime.now() - start_time).total_seconds() < duration:
                exe_network.start_async(current_inference, input_images)

                if previous_inference >= 0:
                    status = infer_requests[previous_inference].wait()
                    if status is not 0:
                        raise Exception("Infer request not completed successfully")

                current_inference += 1
                if current_inference >= args.number_infer_requests:
                    current_inference = 0
                    required_inference_requests_were_executed = True

                previous_inference += 1
                if previous_inference >= args.number_infer_requests:
                    previous_inference = 0

                step += 1

            # wait the latest inference executions
            for not_completed_index in range(args.number_infer_requests):
                if infer_requests[not_completed_index].wait(0) != 0:
                    infer_requests[not_completed_index].wait()

            total_duration = (datetime.now() - start_time).total_seconds()
            fps = batch_size * step / total_duration

            print("[BENCHMARK RESULT] Throughput is {:.4f} FPS".format(fps))

        del exe_network
        del plugin

    except Exception as e:
        logging.exception(e)
コード例 #14
0
def main():
    args = parse_arguments()

    # --------------------------------- 1. Load Plugin for inference engine ---------------------------------
    logger.info("Loading plugin")
    plugin = IEPlugin(args.target_device)

    config = dict()
    if 'CPU' in args.target_device:
        if args.path_to_extension:
            plugin.add_cpu_extension(args.path_to_extension)
        if args.number_threads is not None:
            config.update({'CPU_THREADS_NUM': str(args.number_threads)})
    else:
        raise AttributeError("Device {} do not support of 3D convolution. Please use CPU or HETERO:*CPU*")

    if 'GPU' in args.target_device:
        if args.path_to_cldnn_config:
            config.update({'CONFIG_FILE': args.path_to_cldnn_config})
            logger.info("GPU extensions is loaded %s", args.path_to_cldnn_config)

    plugin.set_config(config)

    logger.info("Device is %s ", plugin.device)
    logger.info("Plugin version is %s", plugin.version)

    # --------------------- 2. Read IR Generated by ModelOptimizer (.xml and .bin files) ---------------------

    xml_filename = os.path.abspath(args.path_to_model)
    bin_filename = os.path.abspath(os.path.splitext(xml_filename)[0] + '.bin')

    ie_network = IENetwork(xml_filename, bin_filename)

    input_info = ie_network.inputs
    if not input_info:
        raise AttributeError("No inputs info is provided")
    if len(input_info) != 1:
        raise AttributeError("Only one input layer network is supported")

    input_name = next(iter(input_info))
    out_name = next(iter(ie_network.outputs))
    print(input_name, out_name)

    # ---------------------------------------- 4. Preparing input data ----------------------------------------
    logger.info("Preparing inputs")

    if len(input_info[input_name].shape) != 5:
        raise AttributeError("Incorrect shape {} for 3d convolution network".format(args.shape))

    n, _, d, h, w = input_info[input_name].shape
    ie_network.batch_size = n

    # ------------------------------------- 4. Loading model to the plugin -------------------------------------
    # logger.info("Reshape of network from {} to {}".format(input_info[input_name].shape, image_crop_pad.shape))
    #ie_network.reshape({input_name: image_crop_pad.shape})
    #input_info = ie_network.inputs

    # logger.info("Loading model to the plugin")
    executable_network = plugin.load(network=ie_network)
    del ie_network

    files = os.listdir(args.path_to_input_data)
    files = [f for f in files if (f.startswith('Patient') and os.path.isfile(os.path.join(args.path_to_input_data, f)))]
    files.sort()

    for f in files:
        header = read_nii_header(os.path.join(args.path_to_input_data, f))
        image = np.array(header.get_data()).astype(np.float32)
        original_shape = image.shape

        start_time = datetime.now()

        image = median_filter(image, 3)

        bbox = lung_bbox(image)

        image_crop = image[bbox[0, 0]:bbox[1, 0], bbox[0, 1]:bbox[1, 1], bbox[0, 2]:bbox[1, 2]]

        new_shape_pad = (d, h, w)
        diff = np.array(new_shape_pad) - np.array(image_crop.shape)
        pad_left = diff // 2
        pad_right = diff - pad_left

        image_crop_pad = np.pad(image_crop, pad_width=tuple([(pad_left[i], pad_right[i]) for i in range(3)]),
                                mode='reflect')

        # dataset statistics
        mean = -303.0502877950004
        mean2 = 289439.0029958802
        std = np.sqrt(mean2 - mean * mean)

        image_crop_pad = (image_crop_pad - mean) / std

        image_crop_pad = image_crop_pad[None, None]

        preprocess_time = datetime.now() - start_time

        test_im = {input_name: image_crop_pad}

        # ---------------------------------------------- 5. Do inference --------------------------------------------
        start_time = datetime.now()
        res = executable_network.infer(test_im)
        infer_time = datetime.now() - start_time

        # ---------------------------- 6. Processing of the received inference results ------------------------------
        result = res[out_name]

        start_time = datetime.now()

        output_crop = result[0, :, pad_left[0]:-pad_right[0] or None, pad_left[1]:-pad_right[1] or None,
                             pad_left[2]:-pad_right[2] or None]

        new_label = np.zeros(shape=(4,) + image.shape)
        new_label[:, bbox[0, 0]:bbox[1, 0], bbox[0, 1]:bbox[1, 1], bbox[0, 2]:bbox[1, 2]] = output_crop

        scale_factor = np.array(original_shape) / np.array(image.shape)
        old_labels = [zoom(new_label[i], scale_factor, order=1, mode='constant', cval=0)[None] for i in range(4)]

        old_label = np.concatenate(tuple(old_labels), axis=0)
        old_label = ((np.argmax(old_label, axis=0) + 1) *
                     np.max((old_label > np.array([0.5, 0.5, 0.5, 0.5]).reshape((-1, 1, 1, 1))).astype(np.int32),
                            axis=0)).astype(np.int32)

        eso_connectivity = morphology.label(old_label == 1)
        heart_connectivity = morphology.label(old_label == 2)
        trachea_connectivity = morphology.label(old_label == 3)
        aorta_connectivity = morphology.label(old_label == 4)
        eso_connectivity = reject_small_regions(eso_connectivity, ratio=0.2)
        heart_connectivity = leave_biggest_region(heart_connectivity)
        trachea_connectivity = leave_biggest_region(trachea_connectivity)
        aorta_connectivity = leave_biggest_region(aorta_connectivity)

        old_label[np.logical_and(old_label == 1, eso_connectivity == 0)] = 0
        old_label[np.logical_and(old_label == 2, heart_connectivity == 0)] = 0
        old_label[np.logical_and(old_label == 3, trachea_connectivity == 0)] = 0
        old_label[np.logical_and(old_label == 4, aorta_connectivity == 0)] = 0

        postprocess_time = datetime.now() - start_time

        logger.info("Pre-processing time is %s; Inference time is %s; Post-processing time is %s",
                    preprocess_time, infer_time, postprocess_time)

        # --------------------------------------------- 7. Save output -----------------------------------------------
        output_header = nii.Nifti1Image(old_label, header.affine)
        nii.save(output_header, os.path.join(args.path_to_output, f[:-7]+'.nii'))
コード例 #15
0
def main():
    log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
    args = build_argparser().parse_args()
    assert args.device.split(':')[0] == "HETERO", "This demo supports only Hetero Plugin. " \
                                                  "Please specify correct device, e.g. HETERO:FPGA,CPU"
    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"

    # Plugin initialization for specified device and load extensions library if specified
    plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
    if args.cpu_extension and 'CPU' in args.device:
        plugin.add_cpu_extension(args.cpu_extension)
    # Read IR
    net = IENetwork(model=model_xml, weights=model_bin)

    if plugin.device == "CPU":
        supported_layers = plugin.get_supported_layers(net)
        not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]
        if len(not_supported_layers) != 0:
            log.error("Following layers are not supported by the plugin for specified device {}:\n {}".
                      format(plugin.device, ', '.join(not_supported_layers)))
            log.error("Please try to specify cpu extensions library path in demo's command line parameters using -l "
                      "or --cpu_extension command line argument")
            sys.exit(1)
    net_ops = set([l.type for l in net.layers.values()])
    if not any([op == "Convolution" for op in net_ops]):
        log.warning("Specified IR doesn't contain any Convolution operations for which affinity going to be set.\n"
                    "Try to use another topology to make the affinity setting result more visible.")

    # Configure the plugin to initialize default affinity for network in set_initial_affinity() function.
    plugin.set_config({"TARGET_FALLBACK": args.device.split(':')[1]})
    # Enable graph visualization
    plugin.set_config({"HETERO_DUMP_GRAPH_DOT": "YES"})
    plugin.set_initial_affinity(net)

    for l in net.layers.values():
        if l.type == "Convolution":
            l.affinity = "GPU"

    assert len(net.inputs.keys()) == 1, "Demo supports only single input topologies"
    assert len(net.outputs) == 1, "Demo supports only single output topologies"
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))
    # Read and pre-process input image
    n, c, h, w = net.inputs[input_blob].shape
    image = cv2.imread(args.input)
    image = cv2.resize(image, (w, h))
    image = image.transpose((2, 0, 1))  # Change data layout from HWC to CHW
    image = image.reshape((n, c, h, w))
    # Load network to the plugin
    exec_net = plugin.load(network=net)
    del net
    # Start sync inference
    res = exec_net.infer(inputs={input_blob: image})
    top_ind = np.argsort(res[out_blob], axis=1)[0, -args.number_top:][::-1]
    for i in top_ind:
        log.info("%f #%d" % (res[out_blob][0, i], i))
    del exec_net
    del plugin
    cwd = os.getcwd()
    log.info(
        "Graphs representing default and resulting affinities dumped to {} and {} files respectively"
        .format(os.path.join(cwd, 'hetero_affinity.dot'), os.path.join(cwd, 'hetero_subgraphs.dot'))
    )
コード例 #16
0
def main():
    log.basicConfig(format="[ %(levelname)s ] %(message)s",
                    level=log.INFO,
                    stream=sys.stdout)
    args = build_argparser().parse_args()
    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"

    # Plugin initialization for specified device and load extensions library if specified
    plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
    if args.cpu_extension and 'CPU' in args.device:
        plugin.add_cpu_extension(args.cpu_extension)
    else:
        plugin.set_config({"PERF_COUNT": "YES"})
    # Read IR
    log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
    net = IENetwork(model=model_xml, weights=model_bin)

    if plugin.device == "CPU":
        supported_layers = plugin.get_supported_layers(net)
        not_supported_layers = [
            l for l in net.layers.keys() if l not in supported_layers
        ]
        if len(not_supported_layers) != 0:
            log.error(
                "Following layers are not supported by the plugin for specified device {}:\n {}"
                .format(plugin.device, ', '.join(not_supported_layers)))
            log.error(
                "Please try to specify cpu extensions library path in sample's command line parameters using -l "
                "or --cpu_extension command line argument")
            sys.exit(1)

    assert len(
        net.inputs.keys()) == 1, "Sample supports only single input topologies"
    assert len(
        net.outputs) == 1, "Sample supports only single output topologies"

    log.info("Preparing input blobs")
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))
    net.batch_size = len(args.input)

    # Read and pre-process input images
    n, c, h, w = net.inputs[input_blob].shape
    images = np.ndarray(shape=(n, c, h, w))
    for i in range(n):
        image = cv2.imread(args.input[i])
        if image.shape[:-1] != (h, w):
            log.warning("Image {} is resized from {} to {}".format(
                args.input[i], image.shape[:-1], (h, w)))
            image = cv2.resize(image, (w, h))
        image = image.transpose(
            (2, 0, 1))  # Change data layout from HWC to CHW
        images[i] = image
    log.info("Batch size is {}".format(n))

    # Loading model to the plugin
    log.info("Loading model to the plugin")
    exec_net = plugin.load(network=net, num_requests=2)

    del net
    # Start sync inference
    log.info("Starting inference ({} iterations)".format(args.number_iter))
    infer_time = []
    for i in range(args.number_iter):
        t0 = time()
        res = exec_net.infer(inputs={input_blob: images})
        infer_time.append((time() - t0) * 1000)
    # Processing output blob
    log.info("Processing output blob")
    res = res[out_blob]
    log.info("Top {} results: ".format(args.number_top))
    if args.labels:
        with open(args.labels, 'r') as f:
            labels_map = [x.split(sep=' ', maxsplit=0)[-1].strip() for x in f]
    else:
        labels_map = None
    for i, probs in enumerate(res):
        probs = np.squeeze(probs)
        top_ind = np.argsort(probs)[-args.number_top:][::-1]
        for id in top_ind:
            det_label = labels_map[id] if labels_map else "#{}".format(id)

            print("{:<5}{:.7f} label {}".format(id, probs[id], det_label))
        print("\n")
    total_inference = np.sum(np.asarray(infer_time))
    log.info("Average running time of one iteration: {:.2f} ms".format(
        np.average(np.asarray(infer_time))))
    log.info(
        "total running time of inference: {:.2f} ms".format(total_inference))
    log.info("Throughput: {:.2f} FPS".format(
        (1000 * args.number_iter * n) / total_inference))
    print("\n")

    #printing performance counts

    if args.perf_counts:
        perf_counts = exec_net.requests[0].get_perf_counts()
        print("performance counts:\n")
        total = 0
        for layer, stats in perf_counts.items():
            total += stats['real_time']
            print(
                "{:<40} {:<15} {:<10} {:<15} {:<8} {:<5} {:<5} {:<5} {:<10} {:<15}"
                .format(layer, stats['status'], 'layerType:',
                        stats['layer_type'], 'realTime:', stats['real_time'],
                        'cpu:', stats['cpu_time'], 'execType:',
                        stats['exec_type']))
        print("{:<20} {:<7} {:<20}".format('TotalTime:', total,
                                           'microseconds'))
    log.info("Execution successful")

    del exec_net
    del plugin
コード例 #17
0
class OpenVinoInferencer:

    # ===================================== __INIT__ =====================================
    def __init__(self, id, xmlModels, noReq=2, verbose=False, mode='MYRIAD'):
        """NCS Inferencer
        
        Arguments:
            id {int} -- id of the movidius stick(ex. 1,2,3... etc)
            xmlModels {list(Str)} -- a list of xml models of the OpenVino networks
        
        Keyword Arguments:
            noReq {int} -- the number of requests per device(threads openvino makes) 
                            (default: {2})
            verbose {bool} -- verbose for NCS platform (default: {False})
        """

        assert noReq >= 2, 'The number of requests should be grater than 2'

        self._id = id
        self.nets = []
        self.execNets = []
        self.noReq = noReq
        self.mode = mode
        self._load_models(xmlModels, noReq, verbose)

        self.inBlobs = [next(iter(net.inputs)) for net in self.nets]
        # self.outBlobs   = [next(iter(net.outputs)) for net in self.nets]
        self.outBlobs = []
        for net in self.nets:
            for out in net.outputs:
                self.outBlobs.append(out)

        # ids of requests for running multithreadded
        self.cReqIds = [0 for _ in range(len(self.nets))]
        self.nextReqIds = [1 for _ in range(len(self.nets))]

    # ===================================== LOAD MODEL ===================================
    def _load_models(self, xmlModels, noReq, verbose):
        """Create an OpenVino plugin and load the .xml models onto the NCS device
        
        Arguments:
            xmlModels {list(str)} -- list of OpenVino XML models
            noReq {int} -- the number of request per network
            verbose {bool} -- verbose NCS device
        """

        # Plugin initialization for specified device and load extensions library
        if self.mode == 'MYRIAD':
            self.plugin = IEPlugin(device='GPU')

            if verbose:
                self.plugin.set_config({"VPU_LOG_LEVEL": "LOG_DEBUG"})

        elif self.mode == 'CPU':
            self.plugin = IEPlugin(device='CPU')

        elif self.mode == 'GPU':
            self.plugin = IEPlugin(device='GPU')

        for xmlModel in xmlModels:
            modelBin = os.path.splitext(xmlModel)[0] + ".bin"
            net = IENetwork(model=xmlModel, weights=modelBin)

            print('loaded', net)
            self.nets.append(net)
            self.execNets.append(
                self.plugin.load(network=net, num_requests=self.noReq))

    # ===================================== PREDICT ASYNC ================================
    def predict_async(self, image, netNo):
        """Predict on ncs device
        
        Arguments:
            image {mat} -- image
            netNo {int} -- the number of network that will make the inference
        """

        self.execNets[netNo].start_async(request_id=self.nextReqIds[netNo],
                                         inputs={self.inBlobs[netNo]: image})

        res = None

        # get return status from OpenVino Doc ( 0 means OK)
        if self.execNets[netNo].requests[self.cReqIds[netNo]].wait(-1) == 0:
            res = self.execNets[netNo]                      \
                            .requests[self.cReqIds[netNo]]   \
                            .outputs[self.outBlobs[netNo]]


        self.cReqIds[netNo]    =  self.cReqIds[netNo]   + 1 \
                                if self.cReqIds[netNo]   < self.noReq - 1 else 0

        self.nextReqIds[netNo]  =  self.nextReqIds[netNo] + 1 \
                                if self.nextReqIds[netNo] < self.noReq - 1 else 0

        return res

    #================================ PRED SYNC ==========================================
    def predict_sync(self, image, netNo, resNo=None):
        if not resNo:
            resNo = [netNo]
        res = self.execNets[netNo].infer({self.inBlobs[netNo]: image})
        fRes = []

        for no in resNo:
            fRes.append(res[self.outBlobs[no]])

        return fRes
コード例 #18
0
def get_openvino_plugin(openvino_network, inference_platform, library_path, cpu_libpath):
    """
    Method used to load IEPlugin according to given target platform
    :param openvino_network: IENetwork object
    :param inference_platform: Target Device Plugin name (CPU, GPU, HETERO:MYRIAD,GPU,CPU etc.
    :param library_path: Lib path to Shared Libraries /opt/intel/openvino/deployment_tools/inference_engine/lib/ubuntu..
    :return: IEPlugin object
    """
    openvino_plugin = None

    # If OpenVINO Selected, Check for Hardware (GPU, MYRIAD or CPU) is supported for this example
    # Load corresponding device library from the indicated paths, this application requires the environment
    # variables are already set correctly
    # source /opt/intel/openvino/bin/setupvars.sh
    if inference_platform == 'GPU':
        print('Trying to Load OpenVINO GPU Plugin')
        openvino_plugin = IEPlugin(device=inference_platform, plugin_dirs=library_path)
    elif inference_platform == 'MYRIAD':
        print('Trying to Load OpenVINO Myriad Plugin')
        openvino_plugin = IEPlugin(device=inference_platform, plugin_dirs=library_path)
        openvino_plugin.set_config({"VPU_FORCE_RESET": "NO"})
    elif inference_platform == 'HETERO:CPU,GPU' or inference_platform == 'HETERO:GPU,CPU':
        openvino_plugin = IEPlugin(device=inference_platform, plugin_dirs=library_path)
        openvino_plugin.add_cpu_extension(cpu_libpath)
        openvino_plugin.set_config({"TARGET_FALLBACK": inference_platform.split(':')[1]})
        # Enable graph visualization
        # openvino_plugin.set_config({"HETERO_DUMP_GRAPH_DOT": "YES"})
        openvino_plugin.set_initial_affinity(openvino_network)
        supported_layers = openvino_plugin.get_supported_layers(openvino_network)
        print('Supported Layers')
        # [print(layer) for layer in supported_layers]
        not_supported_layers = [l for l in openvino_network.layers.keys() if l not in supported_layers]
        print('UnSupported Layers')
        # [print(layer) for layer in not_supported_layers]
    elif inference_platform == 'HETERO:MYRIAD,GPU' or inference_platform == 'HETERO:GPU,MYRIAD':
        openvino_plugin = IEPlugin(device=inference_platform, plugin_dirs=library_path)
        openvino_plugin.set_config({"TARGET_FALLBACK": inference_platform.split(':')[1]})
        # Enable graph visualization
        # openvino_plugin.set_config({"HETERO_DUMP_GRAPH_DOT": "YES"})
        openvino_plugin.set_initial_affinity(openvino_network)
        supported_layers = openvino_plugin.get_supported_layers(openvino_network)
        print('Supported Layers')
        # [print(layer) for layer in supported_layers]
        not_supported_layers = [l for l in openvino_network.layers.keys() if l not in supported_layers]
        print('UnSupported Layers')
        # [print(layer) for layer in not_supported_layers]
    elif inference_platform == 'HETERO:MYRIAD,CPU' or inference_platform == 'HETERO:CPU,MYRIAD':
        openvino_plugin = IEPlugin(device=inference_platform, plugin_dirs=library_path)
        openvino_plugin.add_cpu_extension(cpu_libpath)
        openvino_plugin.set_config({"TARGET_FALLBACK": inference_platform.split(':')[1]})
        # Enable graph visualization
        # openvino_plugin.set_config({"HETERO_DUMP_GRAPH_DOT": "YES"})
        openvino_plugin.set_initial_affinity(openvino_network)
        supported_layers = openvino_plugin.get_supported_layers(openvino_network)
        print('Supported Layers')
        # [print(layer) for layer in supported_layers]
        not_supported_layers = [l for l in openvino_network.layers.keys() if l not in supported_layers]
        print('UnSupported Layers')
        # [print(layer) for layer in not_supported_layers]
    elif inference_platform == "CPU":
        # By default try to load CPU library
        print('Trying to Load OpenVINO CPU Plugin')
        openvino_plugin = IEPlugin(device=inference_platform, plugin_dirs=library_path)
        openvino_plugin.add_cpu_extension(cpu_libpath)
    else:
        print('Undefined Target Platform for OpenVINO: {}'.format(inference_platform))
        help_menu()
        exit(-2)

    return openvino_plugin
def main():
    log.basicConfig(format="[ %(levelname)s ] %(message)s",
                    level=log.INFO,
                    stream=sys.stdout)
    args = build_argparser().parse_args()
    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"

    # Plugin initialization for specified device and load extensions library if specified
    log.info("Initializing plugin for {} device...".format(args.device))

    plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
    plugin2 = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)

    if args.cpu_extension and 'CPU' in args.device:
        plugin.add_cpu_extension(args.cpu_extension)

    # Read IR
    log.info("Reading IR...")
    net = IENetwork(
        model=
        '/opt/intel/computer_vision_sdk_2018.5.445/deployment_tools/intel_models/person-vehicle-bike-detection-crossroad-0078/FP16/person-vehicle-bike-detection-crossroad-0078.xml',
        weights=
        '/opt/intel/computer_vision_sdk_2018.5.445/deployment_tools/intel_models/person-vehicle-bike-detection-crossroad-0078/FP16/person-vehicle-bike-detection-crossroad-0078.bin'
    )
    net2 = IENetwork(
        model=
        '/opt/intel/computer_vision_sdk_2018.5.445/deployment_tools/intel_models/person-vehicle-bike-detection-crossroad-0078/FP16/person-vehicle-bike-detection-crossroad-0078_.xml',
        weights=
        '/opt/intel/computer_vision_sdk_2018.5.445/deployment_tools/intel_models/person-vehicle-bike-detection-crossroad-0078/FP16/person-vehicle-bike-detection-crossroad-0078_.bin'
    )
    #net = IENetwork(model=model_xml, weights=model_bin)
    #net2 = IENetwork(model=model_xml, weights=model_bin)

    cv2.namedWindow("window", cv2.WND_PROP_FULLSCREEN)
    #cv2.setWindowProperty("window",cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)

    # if plugin.device == "CPU":
    #     supported_layers = plugin.get_supported_layers(net)
    #     not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]
    #     if len(not_supported_layers) != 0:
    #         log.error("Following layers are not supported by the plugin for specified device {}:\n {}".
    #                   format(plugin.device, ', '.join(not_supported_layers)))
    #         log.error("Please try to specify cpu extensions library path in demo's command line parameters using -l "
    #                   "or --cpu_extension command line argument")
    #sys.exit(1)
    assert len(
        net.inputs.keys()) == 1, "Demo supports only single input topologies"
    assert len(net.outputs) == 1, "Demo supports only single output topologies"

    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))
    input_blob2 = next(iter(net2.inputs))
    out_blob2 = next(iter(net2.outputs))

    log.info("Loading IR to the plugin...")

    config = dict()
    config.update({'LOG_LEVEL': 'LOG_INFO'})
    config.update({'VPU_LOG_LEVEL': 'LOG_INFO'})
    config.update({"VPU_FORCE_RESET": "NO"})
    plugin.set_config(config)
    plugin2.set_config(config)

    max_request = 2
    exec_net = plugin.load(network=net, num_requests=max_request)
    exec_net2 = plugin2.load(network=net2, num_requests=max_request)

    # Read and pre-process input image
    n, c, h, w = net.inputs[input_blob].shape
    n2, c2, h2, w2 = net2.inputs[input_blob2].shape
    del net
    del net2

    if args.input == 'cam':
        input_stream = 0
    else:
        input_stream = args.input
        assert os.path.isfile(args.input), "Specified input file doesn't exist"
    if args.labels:
        with open(args.labels, 'r') as f:
            labels_map = [x.strip() for x in f]
    else:
        labels_map = None

    cap = cv2.VideoCapture(input_stream)

    cur_request_id = 0
    next_request_id = 1
    max

    log.info("Starting inference in async mode...")
    log.info("To switch between sync and async modes press Tab button")
    log.info("To stop the demo execution press Esc button")
    #is_async_mode = False
    render_time = 0
    ret, frame = cap.read()
    ret2, frame2 = cap.read()

    countt = 1
    while cap.isOpened():
        try:
            start = time.time()
            #if is_async_mode:
            ret, next_frame = cap.read()
            ret2, next_frame2 = cap.read()

            print('finish getting frames image')
            #else:
            #ret, frame = cap.read()
            if not ret:
                break
            initial_w = cap.get(3)
            initial_h = cap.get(4)

            # Main sync point:
            # in the truly Async mode we start the NEXT infer request, while waiting for the CURRENT to complete
            # in the regular mode we start the CURRENT request and immediately wait for it's completion
            #inf_start = time.time()
            #if is_async_mode:
            in_frame = cv2.resize(next_frame, (w, h))
            in_frame = in_frame.transpose(
                (2, 0, 1))  # Change data layout from HWC to CHW
            in_frame = in_frame.reshape((n, c, h, w))
            exec_net.start_async(request_id=next_request_id,
                                 inputs={input_blob: in_frame})
            print('finish start_async1')

            in_frame2 = cv2.resize(next_frame2, (w2, h2))
            in_frame2 = in_frame2.transpose(
                (2, 0, 1))  # Change data layout from HWC to CHW
            in_frame2 = in_frame2.reshape((n2, c2, h2, w2))
            exec_net2.start_async(request_id=next_request_id,
                                  inputs={input_blob2: in_frame2})
            print('finish start_async2')

            # else:
            #     in_frame = cv2.resize(frame, (w, h))
            #     in_frame = in_frame.transpose((2, 0, 1))  # Change data layout from HWC to CHW
            #     in_frame = in_frame.reshape((n, c, h, w))
            #     exec_net.start_async(request_id=cur_request_id, inputs={input_blob: in_frame})

            countt = countt + 1
            print("This round cur_request_id is ", cur_request_id)
            if countt % 2 == 0 and exec_net.requests[cur_request_id].wait(
                    -1) == 0:
                print("1 has result")

                # Parse detection results of the current request
                res = exec_net.requests[cur_request_id].outputs[out_blob]
                for obj in res[0][0]:
                    # Draw only objects when probability more than specified threshold
                    if obj[2] > args.prob_threshold:
                        xmin = int(obj[3] * initial_w)
                        ymin = int(obj[4] * initial_h)
                        xmax = int(obj[5] * initial_w)
                        ymax = int(obj[6] * initial_h)
                        class_id = int(obj[1])

                        # condition for human detection
                        #if xmax-xmin < 200 and ymax-ymin < 200 and (ymax-ymin)/(xmax-xmin)>=2 :
                        # Draw box and label\class_id
                        #color = (min(class_id * 12.5, 255), min(class_id * 7, 255), min(class_id * 5, 255))
                        cv2.rectangle(frame, (xmin, ymin), (xmax, ymax),
                                      (0, 255, 0), 2)
                        det_label = labels_map[
                            class_id] if labels_map else str(class_id)
                        cv2.putText(
                            frame, det_label + ' ' +
                            str(round(obj[2] * 100, 1)) + ' %',
                            (xmin, ymin - 7), cv2.FONT_HERSHEY_COMPLEX, 0.6,
                            (0, 255, 0), 1)

                # Draw performance stats
                # inf_time_message = "Inference time: N\A for async mode" if is_async_mode else \
                #     "Inference time: {:.3f} ms".format(det_time * 1000)
                render_time_message = "OpenCV rendering time: {:.3f} ms".format(
                    render_time * 1000)
                # async_mode_message = "Async mode is on. Processing request {}".format(cur_request_id) if is_async_mode else \
                #     "Async mode is off. Processing request {}".format(cur_request_id)

                #cv2.putText(frame, inf_time_message, (15, 15), cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1)
                cv2.putText(frame, render_time_message, (15, 30),
                            cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)
                # cv2.putText(frame, async_mode_message, (10, int(initial_h - 20)), cv2.FONT_HERSHEY_COMPLEX, 0.5,
                #             (10, 10, 200), 1)

            elif exec_net2.requests[cur_request_id].wait(-1) == 0:
                print("2 has result")

                # Parse detection results of the current request
                res = exec_net2.requests[cur_request_id].outputs[out_blob2]
                for obj in res[0][0]:
                    # Draw only objects when probability more than specified threshold
                    if obj[2] > args.prob_threshold:
                        xmin = int(obj[3] * initial_w)
                        ymin = int(obj[4] * initial_h)
                        xmax = int(obj[5] * initial_w)
                        ymax = int(obj[6] * initial_h)
                        class_id = int(obj[1])

                        # condition for human detection
                        #if xmax-xmin < 200 and ymax-ymin < 200 and (ymax-ymin)/(xmax-xmin)>=2 :
                        # Draw box and label\class_id
                        #color = (min(class_id * 12.5, 255), min(class_id * 7, 255), min(class_id * 5, 255))
                        cv2.rectangle(frame, (xmin, ymin), (xmax, ymax),
                                      (0, 255, 0), 2)
                        det_label = labels_map[
                            class_id] if labels_map else str(class_id)
                        cv2.putText(
                            frame, det_label + ' ' +
                            str(round(obj[2] * 100, 1)) + ' %',
                            (xmin, ymin - 7), cv2.FONT_HERSHEY_COMPLEX, 0.6,
                            (0, 255, 0), 1)

                # Draw performance stats
                # inf_time_message = "Inference time: N\A for async mode" if is_async_mode else \
                #     "Inference time: {:.3f} ms".format(det_time * 1000)
                render_time_message = "OpenCV rendering time: {:.3f} ms".format(
                    render_time * 1000)
                # async_mode_message = "Async mode is on. Processing request {}".format(cur_request_id) if is_async_mode else \
                #     "Async mode is off. Processing request {}".format(cur_request_id)

                #cv2.putText(frame, inf_time_message, (15, 15), cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1)
                cv2.putText(frame, render_time_message, (15, 30),
                            cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)
                # cv2.putText(frame, async_mode_message, (10, int(initial_h - 20)), cv2.FONT_HERSHEY_COMPLEX, 0.5,
                #             (10, 10, 200), 1)

            #
            render_start = time.time()

            cv2.imshow("window", frame)
            render_end = time.time()
            render_time = render_end - render_start

            #if is_async_mode:
            #swap cur and next request id
            #cur_request_id, next_request_id = next_request_id, cur_request_id
            cur_request_id, next_request_id = (
                cur_request_id + 1) % max_request, (next_request_id +
                                                    1) % max_request
            frame = next_frame  # frame is for display only
            frame2 = next_frame2  # frame is for display only

            key = cv2.waitKey(1)
            if key == 27:
                break
            # if (9 == key):
            #     is_async_mode = not is_async_mode
            #     log.info("Switched to {} mode".format("async" if is_async_mode else "sync"))

            end = time.time()
            ttime = end - start
            ttime_message = "total time: {:.3f} ms".format(ttime * 1000)
            print(ttime_message)
            print("=================================")

        except:
            print("Unexpected error:", sys.exc_info()[0])

    cv2.destroyAllWindows()
    del exec_net
    del exec_net2
    del plugin