コード例 #1
0
def init_openvino():
    CNN_xml = CNN_ov_file_name
    CNN_bin = os.path.splitext(CNN_xml)[0] + ".bin"
    plugin = IEPlugin(device=device)
    if plugin.device == "CPU":
        for ext in cpu_extensions:
            plugin.add_cpu_extension(ext)
    CNN_net = IENetwork(model=CNN_xml, weights=CNN_bin)

    if plugin.device == "CPU":
        supported_layers = plugin.get_supported_layers(CNN_net)
        layers = CNN_net.layers.keys()
        not_supported_layers = [l for l in layers if l not in supported_layers]
        if len(not_supported_layers) != 0:
            print(
                "Following layers are not supported by the plugin for specified device {}:\n {}"
                .format(plugin.device, ', '.join(not_supported_layers)),
                file=sys.stderr)
            print(
                "Please try to specify cpu extensions library path in demo's command line parameters using -l "
                "or --cpu_extension command line argument",
                file=sys.stderr)
            sys.exit(1)
    input_CNN = next(iter(CNN_net.inputs))
    output_CNN = next(iter(CNN_net.outputs))
    exec_net = plugin.load(network=CNN_net, num_requests=1)
    del CNN_net
    return exec_net, input_CNN, output_CNN
コード例 #2
0
    def ArrangeNetwork(self):
        log.basicConfig(format="[ %(levelname)s ] %(message)s",
                        level=log.INFO,
                        stream=sys.stdout)
        # Plugin initialization for specified device and load extensions library if specified
        plugin = IEPlugin(device=self.device)
        if self.cpu_extension and 'CPU' in self.device:
            plugin.add_cpu_extension(self.cpu_extension)

        # Read IR
        net = IENetwork.from_ir(model=self.model_xml, weights=self.model_bin)
        assert len(net.inputs.keys()
                   ) == 1, "Sample supports only single input topologies"
        assert len(
            net.outputs) == 1, "Sample supports only single output topologies"
        self.input_blob = next(iter(net.inputs))

        # Set input size
        self.input_size = net.inputs[self.input_blob].shape[2:]

        # Load network to the plugin
        self.exec_net = plugin.load(network=net)
        del net
        # Warmup with last image to avoid caching
        self.exec_net.infer(
            inputs={
                self.input_blob:
                np.zeros((1, 3, self.input_size[0], self.input_size[1]))
            })
コード例 #3
0
    def __init__(
        self,
        yolo_model_xml,
        yolo_model_bin,
        cpu_extension_file,
    ):
        self.num_cls = 20
        self.thr = 0.5
        yolo_net = IENetwork(model=yolo_model_xml, weights=yolo_model_bin)
        self.yolo_input_blob = next(iter(yolo_net.inputs))
        self.yolo_out_blob = next(iter(yolo_net.outputs))

        plugin = IEPlugin(device='CPU')
        plugin.add_cpu_extension(cpu_extension_file)

        # else:
        #     plugin = IEPlugin(device='GPU')
        # plugin = IEPlugin(device='GPU',
        #                   plugin_dirs=r'C:\Program Files (x86)\IntelSWTools\openvino_2019.3.334\deployment_tools\inference_engine\bin\intel64\Release/')

        exec_yolo_net = plugin.load(network=yolo_net)

        del yolo_net

        self.exec_yolo_net = exec_yolo_net
コード例 #4
0
def build_executors(xml_file, bin_file, devices):
    threads_list = []
    image = cv2.imread("images/road.jpeg")
    dst_shape = (896, 512)
    input_images = cv2.dnn.blobFromImages([image], 1, dst_shape, swapRB=True)
    input_images_dict = {"data": input_images}

    for device in devices:
        if device == "CPU":
            plugin = IEPlugin(device)
            requests_num = benchmark_config["cpu_request_num"]
            config = {"CPU_THREADS_NUM": "0", "CPU_THROUGHPUT_STREAMS": str(requests_num)}
            plugin.add_cpu_extension(path_to_cpu_extention)
        elif device == "MYRIAD":
            plugin = IEPlugin("HDDL")
            config = {"LOG_LEVEL": "LOG_INFO",
                      "VPU_LOG_LEVEL": "LOG_INFO"}
            requests_num = benchmark_config["myriad_request_num"]
        else:
            raise ValueError('Unidentified device "{}"!'.format(device))
            sys.exit(-1)

        plugin.set_config(config)
        ie_network = IENetwork(xml_file, bin_file)
        exe_network = plugin.load(ie_network, requests_num)
        infer_executor = InferExecutor(exe_network, input_images_dict)
        executor_thread = InferExecutorThread(device, infer_executor, running_time)
        threads_list.append(executor_thread)
    return threads_list
コード例 #5
0
def load_ir_model(model_xml, device, plugin_dir, cpu_extension):
    model_bin = os.path.splitext(model_xml)[0] + ".bin"

    # initialize plugin
    log.info("Initializing plugin for %s device...", device)
    plugin = IEPlugin(device=device, plugin_dirs=plugin_dir)
    if cpu_extension and 'CPU' in device:
        plugin.add_cpu_extension(cpu_extension)

    # read IR
    net = IENetwork(model=model_xml, weights=model_bin)

    if "CPU" in device:
        supported_layers = plugin.get_supported_layers(net)
        not_supported_layers = [
            l for l in net.layers.keys() if l not in supported_layers
        ]
        if not_supported_layers:
            log.error(
                "Following layers are not supported by the plugin for specified device %s:\n %s",
                plugin.device, ', '.join(not_supported_layers))
            log.error(
                "Please try to specify cpu extensions library path in sample's command line parameters using "
                "--cpu_extension command line argument")
            sys.exit(1)

    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))
    exec_net = plugin.load(network=net, num_requests=2)
    shape = net.inputs[input_blob].shape  # pylint: disable=E1136
    del net

    return exec_net, plugin, input_blob, out_blob, shape
コード例 #6
0
    def build(cls, model_name, model_version, model_xml, model_bin,
              mapping_config, batch_size_param, shape_param, num_ireq,
              target_device, plugin_config):
        plugin = IEPlugin(device=target_device,
                          plugin_dirs=GLOBAL_CONFIG['plugin_dir'])
        if GLOBAL_CONFIG['cpu_extension'] is not None \
                and 'CPU' in target_device:
            plugin.add_cpu_extension(GLOBAL_CONFIG['cpu_extension'])
        net = IENetwork(model=model_xml, weights=model_bin)
        batching_info = BatchingInfo(batch_size_param)
        shape_info = ShapeInfo(shape_param, net.inputs)
        if batching_info.mode == BatchingMode.FIXED:
            net.batch_size = batching_info.batch_size
        else:
            batching_info.batch_size = net.batch_size

        effective_batch_size = batching_info.get_effective_batch_size()
        logger.debug(
            "[Model: {}, version: {}] --- effective batch size - {}".format(
                model_name, model_version, effective_batch_size))
        ###############################
        # Initial shape setup
        if shape_info.mode == ShapeMode.FIXED:
            logger.debug("[Model: {}, version: {}] --- Setting shape to "
                         "fixed value: {}".format(model_name, model_version,
                                                  shape_info.shape))
            net.reshape(shape_info.shape)
        elif shape_info.mode == ShapeMode.AUTO:
            logger.debug("[Model: {}, version: {}] --- Setting shape to "
                         "automatic".format(model_name, model_version))
            net.reshape({})
        elif shape_info.mode == ShapeMode.DEFAULT:
            logger.debug("[Model: {}, version: {}] --- Setting shape to "
                         "default".format(model_name, model_version))
        ###############################
        # Creating free infer requests indexes queue
        free_ireq_index_queue = queue.Queue(maxsize=num_ireq)
        for ireq_index in range(num_ireq):
            free_ireq_index_queue.put(ireq_index)
        ###############################
        requests_queue = queue.Queue(
            maxsize=GLOBAL_CONFIG['engine_requests_queue_size'])

        exec_net = plugin.load(network=net,
                               num_requests=num_ireq,
                               config=plugin_config)
        ir_engine = cls(model_name=model_name,
                        model_version=model_version,
                        mapping_config=mapping_config,
                        net=net,
                        plugin=plugin,
                        exec_net=exec_net,
                        batching_info=batching_info,
                        shape_info=shape_info,
                        free_ireq_index_queue=free_ireq_index_queue,
                        num_ireq=num_ireq,
                        requests_queue=requests_queue,
                        target_device=target_device,
                        plugin_config=plugin_config)
        return ir_engine
コード例 #7
0
def main(path_to_objxml, path_to_objbin, dev, ext, input, labels_map=None):
    # Load network
    obj_net = IENetwork(model=path_to_objxml, weights=path_to_objbin)
    log.info("Loaded network")
    input_layer = next(iter(obj_net.inputs))
    output_layer = next(iter(obj_net.outputs))

    # Pre-process image
    n, c, h, w = obj_net.inputs[input_layer].shape
    obj_frame = cv2.imread(input)
    obj_in_frame = cv2.resize(obj_frame, (w, h))
    obj_in_frame = obj_in_frame.transpose((2, 0, 1))
    obj_in_frame = obj_in_frame.reshape((n, c, h, w))
    log.info("Pre-processed image")

    obj_plugin = IEPlugin(device=dev)
    if dev == 'CPU':
        obj_plugin.add_cpu_extension(ext)
    obj_exec_net = obj_plugin.load(network=obj_net, num_requests=1)
    log.info("Loaded network into plugin")

    # Do inference
    obj_res = obj_exec_net.infer({input_layer: obj_in_frame})
    log.info("Inference successful!")
    obj_det = obj_res[output_layer]

    initial_w = obj_frame.shape[1]
    initial_h = obj_frame.shape[0]
    for obj in obj_det[0][0]:
        # Draw only objects when probability more than specified threshold
        if obj[2] > 0.5:
            xmin = int(obj[3] * initial_w)
            ymin = int(obj[4] * initial_h)
            xmax = int(obj[5] * initial_w)
            ymax = int(obj[6] * initial_h)
            class_id = int(obj[1])

            # Draw box and label\class_id
            color = (min(class_id * 12.5, 255), min(class_id * 7, 255), min(class_id * 5, 255))
            det_label = labels_map[class_id - 1] if labels_map else str(class_id)
            cv2.rectangle(obj_frame, (xmin, ymin), (xmax, ymax), color, 2)
            label_and_prob = det_label + ", " + str(obj[2] * 100) + "%"
            log.info('Detection: ' + label_and_prob)
            #cv2.putText(obj_frame, label_and_prob, (xmin, ymin - 7), cv2.FONT_HERSHEY_COMPLEX, 0.6, color, 1)

    log.info("Hit q to close the window")

    # Resizing to maintainable window
    inHeight = 368
    aspect_ratio = initial_w / initial_h
    inWidth = int(((aspect_ratio*inHeight)*8)//8)
    obj_frame = cv2.resize(obj_frame, (inWidth, inHeight))

    while True:
        cv2.imshow('Detections', obj_frame)
        key = cv2.waitKey(1)
        if (key & 0xFF) == ord('q'):
            break

    cv2.destroyAllWindows()
コード例 #8
0
    def __init__(self, cpu_lib, detector_xml, detection_threshold):
        """
        Initialize openvino detector, load configuration network and weights
        :param cpu_lib:
        :param detector_xml:
        :param detection_threshold:
        """
        # Plugin initialization for specified device and load extensions library if specified
        plugin = IEPlugin(device="CPU")
        plugin.add_cpu_extension(cpu_lib)

        # Read detector IR
        detector_bin = os.path.splitext(detector_xml)[0] + ".bin"
        detector_net = IENetwork.from_ir(model=detector_xml,
                                         weights=detector_bin)

        self.d_in = next(iter(detector_net.inputs))
        self.d_out = next(iter(detector_net.outputs))
        detector_net.batch_size = 1

        # Read and pre-process input images
        self.d_n, self.d_c, self.d_h, self.d_w = detector_net.inputs[
            self.d_in].shape
        self.d_images = np.ndarray(shape=(self.d_n, self.d_c, self.d_h,
                                          self.d_w))

        # Loading models to the plugin
        self.d_exec_net = plugin.load(network=detector_net)

        self.detection_threshold = detection_threshold
コード例 #9
0
    def __initReidentification(self):
        """
        Initialisierung aller Variablen, die für die Gesichtsreidentifikation benötigt werden. Vorallem wird hier die
        Inference Engine von OpenVINO initialisiert.
        """
        if self.__path_to_camera == '0':
            self.__cap = cv2.VideoCapture(0)
        else:
            self.__cap = cv2.VideoCapture(self.__path_to_camera)
        net = IENetwork(model=self.__model_xml, weights=self.__model_bin)
        net_reid = IENetwork(model=self.__model_reid_xml,
                             weights=self.__model_reid_bin)

        plugin = IEPlugin(device="CPU")
        plugin.add_cpu_extension(self.__path_to_cpuextension)
        plugin_reid = IEPlugin(device="CPU")
        plugin_reid.add_cpu_extension(self.__path_to_cpuextension)
        # plugin_reid.set_config(net_reid)
        self.__exec_net = plugin.load(network=net, num_requests=1)
        self.__exec_net_reid = plugin_reid.load(network=net_reid,
                                                num_requests=1)

        self.__input_blob = next(iter(net.inputs))
        self.__out_blob = next(iter(net.outputs))
        # print('network.inputs = ' + str(list(net.inputs)))
        # print('network.outputs = ' + str(list(net.outputs)))
        self.__model_n, self.__model_c, self.__model_h, self.__model_w = net.inputs[
            self.__input_blob].shape

        self.__input_blob_reid = next(iter(net_reid.inputs))
        self.__out_blob_reid = next(iter(net_reid.outputs))

        self.__model_reid_n, self.__model_reid_c, self.__model_reid_h, self.__model_reid_w = net_reid.inputs[
            self.__input_blob_reid].shape
コード例 #10
0
    def __init__(self, model, weights):
        self._model = model
        self._weights = weights

        IE_PLUGINS_PATH = os.getenv("IE_PLUGINS_PATH")
        if not IE_PLUGINS_PATH:
            raise OSError("Inference engine plugin path env not found in the system.")

        plugin = IEPlugin(device="CPU", plugin_dirs=[IE_PLUGINS_PATH])
        if (self._check_instruction("avx2")):
            plugin.add_cpu_extension(os.path.join(IE_PLUGINS_PATH, "libcpu_extension_avx2.so"))
        elif (self._check_instruction("sse4")):
            plugin.add_cpu_extension(os.path.join(IE_PLUGINS_PATH, "libcpu_extension_sse4.so"))
        else:
            raise Exception("Inference engine requires a support of avx2 or sse4.")

        network = IENetwork.from_ir(model=self._model, weights=self._weights)
        supported_layers = plugin.get_supported_layers(network)
        not_supported_layers = [l for l in network.layers.keys() if l not in supported_layers]
        if len(not_supported_layers) != 0:
            raise Exception("Following layers are not supported by the plugin for specified device {}:\n {}".
                      format(plugin.device, ", ".join(not_supported_layers)))

        self._input_blob_name = next(iter(network.inputs))
        self._output_blob_name = next(iter(network.outputs))

        self._net = plugin.load(network=network, num_requests=2)
        input_type = network.inputs[self._input_blob_name]
        self._input_layout = input_type if isinstance(input_type, list) else input_type.shape
コード例 #11
0
def load_model(feature,model_xml,device,plugin_dirs,input_key_length,output_key_length,cpu_extension):

    model_bin = os.path.splitext(model_xml)[0] + ".bin"

    log.info("Initializing plugin for {} device...".format(device))
    plugin = IEPlugin(device, plugin_dirs)

    log.info("Loading network files for {}".format(feature))
    if cpu_extension and 'CPU' in device:
        plugin.add_cpu_extension(cpu_extension)
    else:
        plugin.set_config({"PERF_COUNT":"YES"})

    net = IENetwork(model=model_xml, weights=model_bin)

    if plugin.device == "CPU":
        supported_layers = plugin.get_supported_layers(net)
        not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]
        if len(not_supported_layers) != 0:
            log.error("Following layers are not supported by the plugin for specified device {}:\n {}".
                      format(plugin.device, ', '.join(not_supported_layers)))
            log.error("Please try to specify cpu extensions library path in demo's command line parameters using -l "
                      "or --cpu_extension command line argument")
            sys.exit(1)
    
    log.info("Checking {} network inputs".format(feature))
    assert len(net.inputs.keys()) == input_key_length, "Demo supports only single input topologies"
    log.info("Checking {} network outputs".format(feature))
    assert len(net.outputs) == output_key_length, "Demo supports only single output topologies"
    return plugin,net
コード例 #12
0
def IEsetup(model_xml, model_bin, device, verbose=False):
    start = time()
    plugin = IEPlugin(device=device, plugin_dirs=None)
    libcpu = "inference_engine_samples/intel64/Release/lib/libcpu_extension.so"
    libcpu = os.environ['HOME'] + "/" + libcpu
    if device == "CPU": plugin.add_cpu_extension(libcpu)
    net = IENetwork(model=model_xml, weights=model_bin)

    if verbose: print("* IEsetup", model_bin, "on", device)
    exec_net = plugin.load(network=net, num_requests=1)

    input_blob = next(iter(net.inputs))  #input_blob = 'data'
    model_n, model_c, model_h, model_w = net.inputs[input_blob].shape
    if verbose:
        print("network in shape n/c/h/w (from xml)= %d %d %d %d" %
              (model_n, model_c, model_h, model_w))
    if verbose: print("input_blob =", input_blob)
    out_blobs = []
    for out_blob in net.outputs:
        if verbose:
            print("  net.outputs[", out_blob, "].shape",
                  net.outputs[out_blob].shape)
        out_blobs.append(out_blob)
    if verbose: print("* IEsetup done %.2fmsec" % (1000. * (time() - start)))
    del net
    return exec_net, plugin, input_blob, out_blobs
コード例 #13
0
def load_ie_model(model_xml, device, plugin_dir, cpu_extension=''):
    """Loads a model in the Inference Engine format"""
    model_bin = os.path.splitext(model_xml)[0] + ".bin"
    # Plugin initialization for specified device and load extensions library if specified
    plugin = IEPlugin(device=device, plugin_dirs=plugin_dir)
    if cpu_extension and 'CPU' in device:
        plugin.add_cpu_extension(cpu_extension)
    # Read IR
    log.info("Loading network files:\n\t%s\n\t%s", model_xml, model_bin)
    net = IENetwork(model=model_xml, weights=model_bin)

    if "CPU" in plugin.device:
        supported_layers = plugin.get_supported_layers(net)
        not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]
        if not_supported_layers:
            log.error("Following layers are not supported by the plugin for specified device %s:\n %s",
                      plugin.device, ', '.join(not_supported_layers))
            log.error("Please try to specify cpu extensions library path in sample's command line parameters using -l "
                      "or --cpu_extension command line argument")
            sys.exit(1)

    assert len(net.inputs.keys()) == 1, "Checker supports only single input topologies"
    assert len(net.outputs) == 1, "Checker supports only single output topologies"

    log.info("Preparing input blobs")
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))
    net.batch_size = 1

    # Loading model to the plugin
    log.info("Loading model to the plugin")
    exec_net = plugin.load(network=net)
    model = IEModel(exec_net, net.inputs, input_blob, out_blob)
    del net
    return model
def init_model(xml, bins):
    model_xml = xml
    model_bin = bins
    # Plugin initialization for specified device and load extensions library if specified
    plugin = IEPlugin(device='CPU')
    plugin.add_cpu_extension(
        'utils/libcpu_extension_sse4.so')
    log.info("Reading IR...")
    net = IENetwork(model=model_xml, weights=model_bin)

    if plugin.device == "CPU":
        supported_layers = plugin.get_supported_layers(net)
        not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]
        if len(not_supported_layers) != 0:
            log.error("Following layers are not supported by the plugin for specified device {}:\n {}".
                      format(plugin.device, ', '.join(not_supported_layers)))
            log.error("Please try to specify cpu extensions library path in demo's command line parameters using -l "
                      "or --cpu_extension command line argument")
            sys.exit(1)
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))
    log.info("Loading IR to the plugin...")
    exec_nets = plugin.load(network=net, num_requests=2)
    n, c, h, w = net.inputs[input_blob].shape
    del net
    return exec_nets, n, c, w, h, input_blob, out_blob, plugin
    def __init__(self, device, getFrameFunc):
        self.getFrameFunc = getFrameFunc
        self.originFrame = None
        self.newDataAvailable = False

        if device == 'CPU':
            model_xml = './models/FP32/mobilenet-ssd.xml'
        else:
            model_xml = './models/FP16/mobilenet-ssd.xml'

        model_bin = os.path.splitext(model_xml)[0] + ".bin"

        cpu_extension = '/opt/intel/openvino/inference_engine/lib/intel64/libcpu_extension_sse4.so'
        self.labels = ["None", "plane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "table", "dog", "horse", "motorcycle", "person", "plant", "sheep", "sofa", "train", "monitor"]

        net = IENetwork(model=model_xml, weights=model_bin)
        plugin = IEPlugin(device=device)
        if cpu_extension and 'CPU' in device:
            plugin.add_cpu_extension(cpu_extension)
        self.exec_net = plugin.load(net)

        self.input_blob = next(iter(net.inputs))
        self.out_blob = next(iter(net.outputs))

        n, c, self.h, self.w = net.inputs[self.input_blob].shape

        self.detectedObjects = []
        self.infer_time = 0

        self.inferFPS = 15

        processThread =  threading.Thread(target=self.inferenceThread)
        processThread.daemon = True
        processThread.start()
コード例 #16
0
ファイル: openvino_helper.py プロジェクト: hritools/faceR
def load_network(model_full_path, device, extensions, batch=1, new_shape=None):
    from openvino.inference_engine import IENetwork, IEPlugin

    #  Read in Graph file (IR)
    net = IENetwork.from_ir(model=model_full_path + ".xml",
                            weights=model_full_path + ".bin")

    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))

    #  Plugin initialization for specified device and load extensions library if needed
    plugin = IEPlugin(device=device.upper())

    if 'extension library' in extensions[device]:
        plugin.add_cpu_extension(extensions[device]['extension library'])

    if device.upper() == 'MYRIAD':
        # TODO: set to true if you want to use multiple NCS devices
        # plugin.set_config({"VPU_FORCE_RESET": "YES"})
        exec_net = plugin.load(network=net)
    else:
        net.batch_size = batch

        exec_net = plugin.load(network=net)
        # exec_net = plugin.load(network=net, config={'DYN_BATCH_ENABLED': 'YES'})

    del net
    return plugin, exec_net, input_blob, out_blob
コード例 #17
0
    def __init__(self):
        try:
            xml_path = os.environ["XML_PATH"]
            bin_path = os.environ["BIN_PATH"]

        except KeyError:
            print("Please set the environment variables XML_PATH, BIN_PATH")
            sys.exit(1)

        xml_local_path = GetLocalPath(xml_path)
        bin_local_path = GetLocalPath(bin_path)
        print('path object', xml_local_path)

        CPU_EXTENSION = os.getenv('CPU_EXTENSION',
                                  "/usr/local/lib/libcpu_extension.so")

        plugin = IEPlugin(device='CPU', plugin_dirs=None)
        if CPU_EXTENSION:
            plugin.add_cpu_extension(CPU_EXTENSION)
        net = IENetwork(model=xml_local_path, weights=bin_local_path)
        self.input_blob = next(iter(net.inputs))
        self.out_blob = next(iter(net.outputs))
        self.batch_size = net.inputs[self.input_blob].shape[0]
        self.inputs = net.inputs
        self.outputs = net.outputs
        self.exec_net = plugin.load(network=net, num_requests=self.batch_size)
コード例 #18
0
def main():
    args = build_argparser().parse_args()
    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"

    # Plugin initialization for specified device and load extensions library if specified
    plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
    if args.cpu_extension and 'CPU' in args.device:
        plugin.add_cpu_extension(args.cpu_extension)
    # Read IR
    net = IENetwork.from_ir(model=model_xml, weights=model_bin)
    assert len(
        net.inputs.keys()) == 1, "Sample supports only single input topologies"
    assert len(
        net.outputs) == 1, "Sample supports only single output topologies"
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))
    # Read and pre-process input image
    n, c, h, w = net.inputs[input_blob]
    image = cv2.imread(args.input)
    image = cv2.resize(image, (w, h))
    image = image.transpose((2, 0, 1))  # Change data layout from HWC to CHW
    image = image.reshape((n, c, h, w))
    # Load network to the plugin
    exec_net = plugin.load(network=net)
    del net
    # Start sync inference
    res = exec_net.infer(inputs={input_blob: image})
    top_ind = np.argsort(res[out_blob], axis=1)[0, -args.number_top:][::-1]
    for i in top_ind:
        print("%f #%d" % (res[out_blob][0, i], i))
    del exec_net
    del plugin
コード例 #19
0
class InferenceEngine(object):

    def __init__(self, model_bin, model_xml, device):

        log.basicConfig(format="[ %(levelname)s ] %(message)s",
                        level=log.INFO, stream=sys.stdout)
    # Plugin initialization for specified device and load extensions library if specified
        log.info("Initializing plugin for {} device...".format(device))
        self.plugin = IEPlugin(device=device)

    # Read IR
        log.info("Reading IR...")

        net = IENetwork(model=model_xml, weights=model_bin)

        cpu_extension = "/usr/local/lib/libcpu_extension.so"
        if cpu_extension and 'CPU' in device:
            self.plugin.add_cpu_extension(cpu_extension)

        assert len(net.inputs.keys(
        )) == 1, "This application supports only single input topologies"
        assert len(
            net.outputs) == 1, "This application supports only single output topologies"
        self.input_blob = next(iter(net.inputs))
        self.out_blob = next(iter(net.outputs))
        log.info("Loading IR to the plugin...")
        self.exec_net = self.plugin.load(network=net, num_requests=2)

        self.n, self.c, self.h, self.w = net.inputs[self.input_blob].shape
        del net

        self.asynchronous = False

        self.cur_request_id = 0
        self.next_request_id = 1

    def submit_request(self, frame, wait=False):

        in_frame = cv2.resize(frame, (self.w, self.h))
        # Change data layout from HWC to CHW
        in_frame = in_frame.transpose((2, 0, 1))
        in_frame = in_frame.reshape((self.n, self.c, self.h, self.w))

        if self.asynchronous:
            self.cur_request_id, self.next_request_id = self.next_request_id, self.cur_request_id
            self.exec_net.start_async(request_id=self.next_request_id, inputs={
                                      self.input_blob: in_frame})
        else:
            self.exec_net.start_async(request_id=self.cur_request_id, inputs={
                                      self.input_blob: in_frame})
        if wait:
            return self.wait()

        return True

    def wait(self):
        return (self.exec_net.requests[self.cur_request_id].wait(-1) == 0)

    def fetch_result(self):
        return self.exec_net.requests[self.cur_request_id].outputs[self.out_blob]
コード例 #20
0
 def _init_plugin(self, device, cpu_extension, plugin_dir):
     logger.info("Initializing plugin for {} device...".format(device))
     plugin = IEPlugin(device=device, plugin_dirs=plugin_dir)
     logger.info("Plugin for {} device version:{}".format(
         device, plugin.version))
     if cpu_extension and 'CPU' in device:
         plugin.add_cpu_extension(cpu_extension)
     return plugin
コード例 #21
0
    def run_ie_on_dataset(model_xml,
                          model_bin,
                          cpu_extension_path,
                          images_dir,
                          prob_threshold=0.01):
        plugin = IEPlugin(device='CPU')
        plugin.add_cpu_extension(cpu_extension_path)
        net = IENetwork.from_ir(model=model_xml, weights=model_bin)
        assert len(net.inputs.keys()
                   ) == 1, "Sample supports only single input topologies"
        assert len(
            net.outputs) == 1, "Sample supports only single output topologies"
        input_blob = next(iter(net.inputs))
        out_blob = next(iter(net.outputs))
        exec_net = plugin.load(network=net, num_requests=2)
        num, chs, height, width = net.inputs[input_blob]
        del net
        cur_request_id = 0

        detection_data = []
        for image in os.listdir(images_dir):
            im_path = os.path.join(images_dir, image)
            frame = cv2.imread(im_path)
            initial_h, initial_w, _ = frame.shape
            in_frame = cv2.resize(frame, (width, height))
            in_frame = in_frame.transpose(
                (2, 0, 1))  # Change data layout from HWC to CHW
            in_frame = in_frame.reshape((num, chs, height, width))

            objects_per_image = []
            exec_net.start_async(request_id=cur_request_id,
                                 inputs={input_blob: in_frame})

            if exec_net.requests[cur_request_id].wait(-1) == 0:
                res = exec_net.requests[cur_request_id].outputs[out_blob]
                for obj in res[0][0]:
                    if obj[2] > prob_threshold:
                        xmin = int(obj[3] * initial_w)
                        ymin = int(obj[4] * initial_h)
                        xmax = int(obj[5] * initial_w)
                        ymax = int(obj[6] * initial_h)
                        class_id = int(obj[1])
                        conf = obj[2]
                        objects_per_image.append({
                            'bbox': [xmin, ymin, xmax, ymax],
                            'class_id':
                            class_id,
                            'score':
                            conf
                        })

            det_item = {'image': im_path, 'objects': objects_per_image}
            detection_data.append(det_item)

        del exec_net
        del plugin

        return detection_data
コード例 #22
0
ファイル: infer.py プロジェクト: mtferrum/V3
class Infer:

    def __init__(self, mode='CPU', cpu_extension=None):
        self.vino_plugin = IEPlugin(mode)
        if mode == 'CPU':
            # Openvino 2019 and less
            try:
                if cpu_extension is None:
                    cpu_extension = "/opt/intel/openvino_2019.3.376/inference_engine/lib/intel64/libcpu_extension_avx512.so"
                self.vino_plugin.add_cpu_extension(cpu_extension)
            # Openvino 2020+
            except RuntimeError:
                pass
        self.detector = FaceDetectorModel(vino_plugin=self.vino_plugin)
        self.landmark = LandmarksDetector(vino_plugin=self.vino_plugin)
        self.reid = FaceReidModel(vino_plugin=self.vino_plugin)

    def infer_frame(self, frame):
        output_person = []
        faces_features = []
        initial_h, initial_w, initial_c = frame.shape
        # Execute Inference
        detector_res = self.detector.prepare_and_infer(frame)
        log.debug("infer Detection result: {}".format(detector_res.shape))
        for fk, obj in enumerate(detector_res[0][0]):
            prob_threshold = obj[2]
            if prob_threshold > 0.50:
                # Mapping NN face coordinates to frame's face coordinates
                xmin = int(obj[3] * initial_w)
                ymin = int(obj[4] * initial_h)
                xmax = int(obj[5] * initial_w)
                ymax = int(obj[6] * initial_h)
                xmin = xmin if xmin >= 0 else 0
                ymin = ymin if ymin >= 0 else 0
                xmax = xmax if xmax <= initial_w else initial_w
                ymax = ymax if ymax <= initial_h else initial_h

                # Cutting face from the frame
                cropped = frame[ymin:ymax, xmin:xmax]
                rgb_face = cropped[:, :, ::-1]
                landmark_res = self.landmark.prepare_and_infer(rgb_face)
                #log.info('landmark: {}'.format(landmark_res.shape))
                aligned_face = align_face(cropped, landmark_res[0])
                reid_res = self.reid.prepare_and_infer(aligned_face)
                #log.info('features: {}'.format(reid_res.shape))
                faces_features.append(reid_res)
                face_dict = {'initial': {'height': initial_h, 'width': initial_w},
                                  'left': xmin, 'top': ymin, 'right': xmax,
                                  'bottom': ymax}
                output_person.append(face_dict)

        if output_person:
            faces_features = np.vstack(faces_features)
            faces_features = faces_features.reshape(faces_features.shape[0], 256)
            for i in range(len(output_person)):
                output_person[i]['features'] = faces_features[i, :]
        return output_person
コード例 #23
0
def main():
    args = build_argparser().parse_args()
    assert args.device.split(':')[0] == "HETERO", "This sample supports only Hetero Plugin. " \
                                                  "Please specify correct device, e.g. HETERO:FPGA,CPU"
    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"

    # Plugin initialization for specified device and load extensions library if specified
    plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
    if args.cpu_extension and 'CPU' in args.device:
        plugin.add_cpu_extension(args.cpu_extension)
    # Read IR
    net = IENetwork.from_ir(model=model_xml, weights=model_bin)

    layers = net.get_layers()
    net_ops = set([l['type'] for l in layers.values()])
    if not any(op in net_ops for op in ("Convolution", "Concat")):
        print("Specified IR doesn't contain any Convolution or Concat operations for which affinity going to be set.\n"
              "Try to use another topology to make the affinity setting result more visible.")

    # Configure the plugin to initialize default affinity for network in set_initial_affinity() function.
    plugin.set_config({"TARGET_FALLBACK": args.device.split(':')[1]})
    # Enable graph visualization
    plugin.set_config({"HETERO_DUMP_GRAPH_DOT": "YES"})
    plugin.set_initial_affinity(net)

    net.set_affinity(types_affinity_map={"Convolution": "GPU", "Concat": "CPU"})
    # Affinity setting example based on layer name.
    # layers_affinity_map has higher priority and will overrides affinity set by layer type.
    # net.set_affinity(types_affinity_map={"Convolution": "GPU", "Concat": "CPU"},
    #                  layers_affinity_map={"fire4/expand3x3/Conv2D": "CPU"})

    assert len(net.inputs.keys()) == 1, "Sample supports only single input topologies"
    assert len(net.outputs) == 1, "Sample supports only single output topologies"
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))
    # Read and pre-process input image
    n, c, h, w = net.inputs[input_blob]
    image = cv2.imread(args.input)
    image = cv2.resize(image, (w, h))
    image = image.transpose((2, 0, 1))  # Change data layout from HWC to CHW
    image = image.reshape((n, c, h, w))
    # Load network to the plugin
    exec_net = plugin.load(network=net)
    del net
    # Start sync inference
    res = exec_net.infer(inputs={input_blob: image})
    top_ind = np.argsort(res[out_blob], axis=1)[0, -args.number_top:][::-1]
    for i in top_ind:
        print("%f #%d" % (res[out_blob][0, i], i))
    del exec_net
    del plugin
    cwd = os.getcwd()
    print(
        "Graphs representing default and resulting affinities dumped to {} and {} files respectively"
            .format(os.path.join(cwd, 'hetero_affinity.dot'), os.path.join(cwd, 'hetero_subgraphs.dot'))
    )
コード例 #24
0
ファイル: openvinotest.py プロジェクト: VladoDemcak/ovdebug
def main():
    log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
    args = build_argparser().parse_args()
    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"

    # Plugin initialization for specified device and load extensions library if specified
    plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
    if args.cpu_extension and 'CPU' in args.device:
        plugin.add_cpu_extension(args.cpu_extension)
    # Read IR
    log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
    net = IENetwork(model=model_xml, weights=model_bin)

    if plugin.device == "CPU":
        supported_layers = plugin.get_supported_layers(net)
        not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]
        if len(not_supported_layers) != 0:
            log.error("Following layers are not supported by the plugin for specified device {}:\n {}".
                      format(plugin.device, ', '.join(not_supported_layers)))
            log.error("Please try to specify cpu extensions library path in sample's command line parameters using -l "
                      "or --cpu_extension command line argument")
            sys.exit(1)

    assert len(net.inputs.keys()) == 1, "Sample supports only single input topologies"
    assert len(net.outputs) == 1, "Sample supports only single output topologies"

    log.info("Preparing input blobs")
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))

    cap = cv2.VideoCapture(0) if args.input is None else cv2.VideoCapture(args.input)
    
    # Loading model to the plugin
    log.info("Loading model to the plugin")
    exec_net = plugin.load(network=net)

    n, c, h, w = net.inputs[input_blob].shape  

    i = 0
    while cap.isOpened():
        ret, img = cap.read()
        if img is None:
            break
        frame = cv2.resize(img, (w, h)).transpose((2, 0, 1)) if img.shape[:-1] != (h, w) else img
        log.info("#{} exec_net.infer starting...".format(i))
        res = exec_net.infer(inputs={input_blob: frame})[out_blob]
        log.info("#{} exec_net.infer finished, res: {}".format(i, res[0][0][0]))
        i += 1

        cv2.imshow("debug", img)
        cv2.waitKey(1)

    cv2.destroyAllWindows()
    cap.release()
コード例 #25
0
ファイル: detector.py プロジェクト: Doken-Tokuyama/pyvino
    def _set_ieplugin(self):
        """

        """
        plugin = IEPlugin(device=self.device, plugin_dirs=None)

        if self.device == "CPU":
            plugin.add_cpu_extension(self.cpu_extension)
        else:
            raise NotImplementedError("Now, Only CPU is supported")
        self.exec_net = plugin.load(network=self.net, num_requests=2)
コード例 #26
0
def test_add_cpu_extenstion_wrong_device():
    with pytest.raises(RuntimeError) as e:
        with warnings.catch_warnings(record=True) as w:
            plugin = IEPlugin("GPU", None)
            plugin.add_cpu_extension("./")
        assert len(w) == 1
        assert "IEPlugin class is deprecated. " \
               "Please use IECore class instead." in str(w[0].message)
    if "Cannot find plugin to use" in str(e.value):
        pytest.skip("No GPU found. Skipping test")
    else:
        assert "add_cpu_extension method applicable only for CPU or HETERO devices" in str(e.value)
コード例 #27
0
ファイル: main.py プロジェクト: bosques-urbanos/iikim
def create_ie_plugin(device='CPU', cpu_extension=None, plugin_dir=None):
    print("Initializing plugin for {} device...".format(device))
    plugin = IEPlugin(device, plugin_dirs=plugin_dir)

    if 'MYRIAD' in device:
        myriad_config = {"VPU_HW_STAGES_OPTIMIZATION": "YES"}
        plugin.set_config(myriad_config)

    if cpu_extension and 'CPU' in device:
        plugin.add_cpu_extension(cpu_extension)

    return plugin
コード例 #28
0
class FacialLamdmark():
    def __init__(self):
        if platform.system() == 'Windows':
            self.plugin = IEPlugin(device='CPU', plugin_dirs=None)
            model_xml = "./extension/IR/FP32/landmarks-regression-retail-0009.xml"
            model_bin = "./extension/IR/FP32/landmarks-regression-retail-0009.bin"
            extension_path = 'extension/cpu_extension.dll'
        elif platform.system() == 'Darwin':
            self.plugin = IEPlugin(device='CPU', plugin_dirs=None)
            model_xml = "./extension/IR/FP32/landmarks-regression-retail-0009.xml"
            model_bin = "./extension/IR/FP32/landmarks-regression-retail-0009.bin"
            extension_path = 'extension/libcpu_extension.dylib'
        else:
            self.plugin = IEPlugin(device='MYRIAD', plugin_dirs=None)
            model_xml = "./extension/IR/FP16/landmarks-regression-retail-0009.xml"
            model_bin = "./extension/IR/FP16/landmarks-regression-retail-0009.bin"
            extension_path = 'extension/libcpu_extension.dylib'

        extension_path = pathlib.Path(extension_path)
        ab_extension_path = str(extension_path.resolve())
        self.cpu_extension = ab_extension_path

        net = IENetwork(model=model_xml, weights=model_bin)
        self.input_blob = next(iter(net.inputs))
        self.out_blob = next(iter(net.outputs))
        self.n, self.c, self.h, self.w = net.inputs[self.input_blob].shape
        self.plugin.add_cpu_extension(self.cpu_extension)
        self.exec_net = self.plugin.load(network=net, num_requests=2)
        self.face_id = 0

    def turn_face(self, face_img):
        facial_landmarks = np.zeros((face_img.shape[2], 5, 2))
        in_frame = cv2.resize(face_img, (self.w, self.h))
        in_frame = in_frame.transpose((2, 0, 1))
        in_frame = in_frame.reshape((self.n, self.c, self.h, self.w))
        self.exec_net.start_async(request_id=0, inputs={self.input_blob: in_frame})

        if self.exec_net.requests[0].wait(-1) == 0:
            res = self.exec_net.requests[0].outputs[self.out_blob].reshape(1, 10)[0]

            lm_face = face_img.copy()
            for i in range(res.size // 2):
                normed_x = res[2 * i]
                normed_y = res[2 * i + 1]
                x_lm = lm_face.shape[1] * normed_x
                y_lm = lm_face.shape[0] * normed_y
                cv2.circle(lm_face, (int(x_lm), int(y_lm)), 1 + int(0.03 * lm_face.shape[1]), (255, 255, 0), -1)
                facial_landmarks[self.face_id][i] = (x_lm, y_lm)

            aligned_face = face_img.copy()
            aligned_face = align_face(aligned_face, facial_landmarks[self.face_id])

            return aligned_face
コード例 #29
0
def greengrass_classification_sample_run():
    client.publish(topic=topic_name, payload="OpenVINO: Initializing...")
    model_bin = os.path.splitext(PARAM_MODEL_XML)[0] + ".bin"

    # Plugin initialization for specified device and load extensions library if specified
    plugin = IEPlugin(device=PARAM_DEVICE, plugin_dirs="")
    if "CPU" in PARAM_DEVICE:
        plugin.add_cpu_extension(PARAM_CPU_EXTENSION_PATH)
    # Read IR
    net = IENetwork.from_ir(model=PARAM_MODEL_XML, weights=model_bin)
    assert len(
        net.inputs.keys()) == 1, "Sample supports only single input topologies"
    assert len(
        net.outputs) == 1, "Sample supports only single output topologies"
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))
    # Read and pre-process input image
    n, c, h, w = net.inputs[input_blob]
    cap = cv2.VideoCapture(PARAM_INPUT_SOURCE)
    exec_net = plugin.load(network=net)
    del net
    client.publish(topic=topic_name,
                   payload="Starting inference on %s" % PARAM_INPUT_SOURCE)
    start_time = timeit.default_timer()
    while (cap.isOpened()):
        ret, frame = cap.read()
        if not ret:
            break
        frame = cv2.resize(frame, (w, h))
        in_frame = frame.transpose(
            (2, 0, 1))  # Change data layout from HWC to CHW
        in_frame = in_frame.reshape((n, c, h, w))

        # Start sync inference
        res = exec_net.infer(inputs={input_blob: in_frame})
        top_ind = np.argsort(res[out_blob],
                             axis=1)[0, -PARAM_NUM_TOP_RESULTS:][::-1]
        res_json = []
        for i in top_ind:
            res_json.append({
                "confidence": round(res[out_blob][0, i], 2),
                "label": i
            })

        # Measure elapsed seconds since the last report
        seconds_elapsed = timeit.default_timer() - start_time
        if seconds_elapsed >= reporting_interval:
            report(res_json, frame)
            start_time = timeit.default_timer()

    client.publish(topic=topic_name, payload="End of the input, exiting...")
    del exec_net
    del plugin
コード例 #30
0
    def __init__(self, cfg_file, yolo_model_xml, yolo_model_bin,
                 cpu_extension_file, equal_scale):
        self.thr = 0.001
        self.equal_scale = equal_scale
        blocks = parse_cfg(cfg_file)
        self.input_width = int(blocks[0]['width'])
        self.input_height = int(blocks[0]['height'])
        self.blocks = blocks[1:]
        self.anchors = None
        self.stride = [32, 16, 8]
        for index, xx in enumerate(self.blocks):
            if xx['type'] == 'yolo':
                if self.anchors is None:
                    anchors = xx['anchors']
                    anchors = anchors.split(',')
                    anchors = list(map(int, anchors))
                    self.anchors = anchors
                    self.num_cls = int(xx['classes'])
                    # self.ignore_thresh = float(xx['ignore_thresh'])
                    break
        print(self.anchors)
        print(self.num_cls)
        base_anchors = np.array(self.anchors, dtype=np.float32)
        # base_anchors = [(23, 27), (37, 58), (81, 82), (81, 82), (135, 169), (344, 319)]
        # base_anchors = np.array(base_anchors, dtype=np.float32)
        base_anchors = base_anchors.reshape(-1, 3, 2)
        self.base_anchors = base_anchors[::-1].copy()

        self.coords = []
        for i in range(len(self.stride)):
            self.coords.append(
                get_coord(max(self.input_width, self.input_height),
                          self.stride[i]))

        yolo_net = IENetwork(model=yolo_model_xml, weights=yolo_model_bin)
        self.yolo_input_blob = next(iter(yolo_net.inputs))
        print(type(yolo_net.outputs))
        self.yolo_out_blob = list(yolo_net.outputs.keys())

        plugin = IEPlugin(device='CPU')
        plugin.add_cpu_extension(cpu_extension_file)

        # else:
        #     plugin = IEPlugin(device='GPU')
        # plugin = IEPlugin(device='GPU',
        #                   plugin_dirs=r'C:\Program Files (x86)\IntelSWTools\openvino_2019.3.334\deployment_tools\inference_engine\bin\intel64\Release/')

        exec_yolo_net = plugin.load(network=yolo_net)

        del yolo_net

        self.exec_yolo_net = exec_yolo_net
コード例 #31
0
  def run_ie_on_dataset(model_xml, model_bin, cpu_extension_path, images_dir, prob_threshold=0.01):
    plugin = IEPlugin(device='CPU')
    plugin.add_cpu_extension(cpu_extension_path)
    net = IENetwork.from_ir(model=model_xml, weights=model_bin)
    assert len(net.inputs.keys()) == 1, "Sample supports only single input topologies"
    assert len(net.outputs) == 1, "Sample supports only single output topologies"
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))
    exec_net = plugin.load(network=net, num_requests=2)
    num, chs, height, width = net.inputs[input_blob]
    del net
    cur_request_id = 0

    detection_data = []
    for image in os.listdir(images_dir):
      im_path = os.path.join(images_dir, image)
      frame = cv2.imread(im_path)
      initial_h, initial_w, _ = frame.shape
      in_frame = cv2.resize(frame, (width, height))
      in_frame = in_frame.transpose((2, 0, 1))  # Change data layout from HWC to CHW
      in_frame = in_frame.reshape((num, chs, height, width))

      objects_per_image = []
      exec_net.start_async(request_id=cur_request_id, inputs={input_blob: in_frame})

      if exec_net.requests[cur_request_id].wait(-1) == 0:
        res = exec_net.requests[cur_request_id].outputs[out_blob]
        for obj in res[0][0]:
          if obj[2] > prob_threshold:
            xmin = int(obj[3] * initial_w)
            ymin = int(obj[4] * initial_h)
            xmax = int(obj[5] * initial_w)
            ymax = int(obj[6] * initial_h)
            class_id = int(obj[1])
            conf = obj[2]
            objects_per_image.append({'bbox': [xmin, ymin, xmax, ymax], 'class_id': class_id, 'score': conf})

      det_item = {'image': im_path, 'objects': objects_per_image}
      detection_data.append(det_item)

    del exec_net
    del plugin

    return detection_data
コード例 #32
0
def greengrass_object_detection_sample_ssd_run():
    client.publish(topic=PARAM_TOPIC_NAME, payload="OpenVINO: Initializing...")
    model_bin = os.path.splitext(PARAM_MODEL_XML)[0] + ".bin"

    # Plugin initialization for specified device and load extensions library if specified
    plugin = IEPlugin(device=PARAM_DEVICE, plugin_dirs="")
    if "CPU" in PARAM_DEVICE:
        plugin.add_cpu_extension(PARAM_CPU_EXTENSION_PATH)

    # Read IR
    net = IENetwork(model=PARAM_MODEL_XML, weights=model_bin)
    client.publish(topic=PARAM_TOPIC_NAME, payload="OpenVINO: Read IR...")
    assert len(net.inputs.keys()) == 1, "Sample supports only single input topologies"
    assert len(net.outputs) == 1, "Sample supports only single output topologies"
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))

    # Read and pre-process input image
    n, c, h, w = net.inputs[input_blob].shape
    cap = cv2.VideoCapture(PARAM_INPUT_SOURCE)
    exec_net = plugin.load(network=net)
    del net
    client.publish(topic=PARAM_TOPIC_NAME, payload="Starting inference on %s" % PARAM_INPUT_SOURCE)
    start_time = timeit.default_timer()
    inf_seconds = 0.0
    frame_count = 0
    labeldata = None
    if PARAM_LABELMAP_FILE is not None:
        with open(PARAM_LABELMAP_FILE) as labelmap_file:
            labeldata = json.load(labelmap_file)

    while (cap.isOpened()):
        ret, frame = cap.read()
        if not ret:
            break
        frameid = cap.get(cv2.CAP_PROP_POS_FRAMES)
        initial_w = cap.get(3)
        initial_h = cap.get(4)
        in_frame = cv2.resize(frame, (w, h))
        in_frame = in_frame.transpose((2, 0, 1))  # Change data layout from HWC to CHW
        in_frame = in_frame.reshape((n, c, h, w))
        # Start synchronous inference
        inf_start_time = timeit.default_timer()
        res = exec_net.infer(inputs={input_blob: in_frame})
        inf_seconds += timeit.default_timer() - inf_start_time
        # Parse detection results of the current request
        res_json = OrderedDict()
        frame_timestamp = datetime.datetime.now()
        object_id = 0
        for obj in res[out_blob][0][0]:
            if obj[2] > 0.5:
                xmin = int(obj[3] * initial_w)
                ymin = int(obj[4] * initial_h)
                xmax = int(obj[5] * initial_w)
                ymax = int(obj[6] * initial_h)
                cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (255, 165, 20), 4)
                obj_id = "Object" + str(object_id)
                classlabel = labeldata[str(int(obj[1]))] if labeldata else ""
                res_json[obj_id] = {"label": int(obj[1]), "class": classlabel, "confidence": round(obj[2], 2), "xmin": round(
                    obj[3], 2), "ymin": round(obj[4], 2), "xmax": round(obj[5], 2), "ymax": round(obj[6], 2)}
                object_id += 1
        frame_count += 1
        # Measure elapsed seconds since the last report
        seconds_elapsed = timeit.default_timer() - start_time
        if seconds_elapsed >= reporting_interval:
            res_json["timestamp"] = frame_timestamp.isoformat()
            res_json["frame_id"] = int(frameid)
            res_json["inference_fps"] = frame_count / inf_seconds
            start_time = timeit.default_timer()
            report(res_json, frame)
            frame_count = 0
            inf_seconds = 0.0

    client.publish(topic=PARAM_TOPIC_NAME, payload="End of the input, exiting...")
    del exec_net
    del plugin
コード例 #33
0
def main():
  log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
  args = build_argparser().parse_args()
  model_xml = args.model
  model_bin = os.path.splitext(model_xml)[0] + ".bin"
  # Plugin initialization for specified device and load extensions library if specified
  log.info("Initializing plugin for {} device...".format(args.device))
  plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
  if args.cpu_extension and 'CPU' in args.device:
    plugin.add_cpu_extension(args.cpu_extension)

  # Read IR
  log.info("Reading IR...")
  net = IENetwork.from_ir(model=model_xml, weights=model_bin)

  if "CPU" in plugin.device:
    supported_layers = plugin.get_supported_layers(net)
    not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]
    if len(not_supported_layers) != 0:
      log.error("Following layers are not supported by the plugin for specified device {}:\n {}".
                format(plugin.device, ', '.join(not_supported_layers)))
      log.error("Please try to specify cpu extensions library path in sample's command line parameters using -l "
                "or --cpu_extension command line argument")
      sys.exit(1)
  assert len(net.inputs.keys()) == 1, "Sample supports only single input topologies"
  assert len(net.outputs) == 1, "Sample supports only single output topologies"
  input_blob = next(iter(net.inputs))
  out_blob = next(iter(net.outputs))
  log.info("Loading IR to the plugin...")
  exec_net = plugin.load(network=net, num_requests=2)
  # Read and pre-process input image
  n, c, h, w = net.inputs[input_blob]
  del net

  predictions = []
  data = Input(args.input_type, args.input)
  cur_request_id = 0

  fps = 25
  out_width = 640
  out_height = 480
  if args.dump_output_video:
    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    out = cv2.VideoWriter(args.path_to_output_video, fourcc, fps, (int(out_width), int(out_height)))

  while not data.is_finished():
    frame, img_id = data.get_next_item()
    initial_h, initial_w, channels = frame.shape
    in_frame = cv2.resize(frame, (w, h))
    in_frame = in_frame.transpose((2, 0, 1))  # Change data layout from HWC to CHW
    in_frame = in_frame.reshape((n, c, h, w))

    exec_net.start_async(request_id=cur_request_id, inputs={input_blob: in_frame})
    if exec_net.requests[cur_request_id].wait(-1) == 0:

      # Parse detection results of the current request
      res = exec_net.requests[cur_request_id].outputs[out_blob]
      coco_detections = []
      for obj in res[0][0]:
        # Draw only objects when probability more than specified threshold
        if obj[2] > args.prob_threshold:
          x1 = float(obj[3] * initial_w)
          y1 = float(obj[4] * initial_h)
          x2 = float(obj[5] * initial_w)
          y2 = float(obj[6] * initial_h)

          x_, y_ = round(x1, 1), round(y1, 1)
          w_ = round(x2 - x1, 1)
          h_ = round(y2 - y1, 1)
          class_id = int(obj[1])

          coco_det = {}
          coco_det['image_id'] = img_id
          coco_det['category_id'] = class_id
          coco_det['bbox'] = [x_, y_, w_, h_]
          coco_det['score'] = round(float(obj[2]), 1)
          coco_detections.append(coco_det)

          # Draw box and label\class_id
          cv2.rectangle(frame, (int(x1), int(y1)), (int(x2), int(y2)), (255, 0, 0), 2)
          cv2.putText(frame, str(class_id) + ' ' + str(round(obj[2] * 100, 1)) + ' %', (int(x1), int(y1) - 7),
                      cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
      predictions.extend(coco_detections)

    if args.dump_output_video:
      img_resized = cv2.resize(frame, (out_width, out_height))
      out.write(img_resized)
    if args.show:
      cv2.imshow("Detection Results", frame)
      key = cv2.waitKey(10)
      if key == 27:
        break

  if args.dump_predictions_to_json:
    with open(args.output_json_path, 'w') as output_file:
      json.dump(predictions, output_file, sort_keys=True, indent=4)

  cv2.destroyAllWindows()
  del exec_net
  del plugin