def ArrangeNetwork(self):
        log.basicConfig(format="[ %(levelname)s ] %(message)s",
                        level=log.INFO,
                        stream=sys.stdout)
        # Plugin initialization for specified device and load extensions library if specified
        plugin = IEPlugin(device=self.device)
        if self.cpu_extension and 'CPU' in self.device:
            plugin.add_cpu_extension(self.cpu_extension)

        # Read IR
        net = IENetwork.from_ir(model=self.model_xml, weights=self.model_bin)
        assert len(net.inputs.keys()
                   ) == 1, "Sample supports only single input topologies"
        assert len(
            net.outputs) == 1, "Sample supports only single output topologies"
        self.input_blob = next(iter(net.inputs))

        # Set input size
        self.input_size = net.inputs[self.input_blob].shape[2:]

        # Load network to the plugin
        self.exec_net = plugin.load(network=net)
        del net
        # Warmup with last image to avoid caching
        self.exec_net.infer(
            inputs={
                self.input_blob:
                np.zeros((1, 3, self.input_size[0], self.input_size[1]))
            })
Exemple #2
0
def load_ir_model(model_xml, device, plugin_dir, cpu_extension):
  model_bin = os.path.splitext(model_xml)[0] + ".bin"

  # initialize plugin
  log.info("Initializing plugin for %s device...", device)
  plugin = IEPlugin(device=device, plugin_dirs=plugin_dir)
  if cpu_extension and 'CPU' in device:
    plugin.add_cpu_extension(cpu_extension)

  # read IR
  log.info("Reading IR...")
  net = IENetwork.from_ir(model=model_xml, weights=model_bin)

  if "CPU" in plugin.device:
    supported_layers = plugin.get_supported_layers(net)
    not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]
    if not_supported_layers:
      log.error("Following layers are not supported by the plugin for specified device %s:\n %s",
                plugin.device, ', '.join(not_supported_layers))
      log.error("Please try to specify cpu extensions library path in sample's command line parameters using "
                "--cpu_extension command line argument")
      sys.exit(1)

  # input / output check
  assert len(net.inputs.keys()) == 1, "LPRNet must have only single input"
  assert len(net.outputs) == 1, "LPRNet must have only single output topologies"
  input_blob = next(iter(net.inputs))
  out_blob = next(iter(net.outputs))
  log.info("Loading IR to the plugin...")
  exec_net = plugin.load(network=net, num_requests=2)
  shape = net.inputs[input_blob].shape
  del net

  return exec_net, plugin, input_blob, out_blob, shape
def prepare_inference_engine():
    """Takes and reads IR(.xml+.bin) from command line,loads device to plugin,
	initializes input and output blobs, and loads the network to the plugin.

	Returns:
	  pointers to the loaded network, input of the network, and output of the network
	"""

    plugin = IEPlugin(device=FLAGS.d, plugin_dirs=FLAGS.plugin_dirs)

    model_xml = FLAGS.m
    model_bin = os.path.splitext(model_xml)[0] + ".bin"

    net = IENetwork.from_ir(model=model_xml, weights=model_bin)

    input_blob = next(iter(net.inputs))  # grap inputs shape and dimensions
    output_blob = next(iter(net.outputs))  # grab output shape and dimension

    plugin = IEPlugin(device=FLAGS.d, plugin_dirs=None)

    net.batch_size = 1  #hardcoding to 1 for now

    exec_net = plugin.load(network=net)

    return exec_net, input_blob, output_blob
def main():
    args = build_argparser().parse_args()
    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"

    # Plugin initialization for specified device and load extensions library if specified
    plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
    if args.cpu_extension and 'CPU' in args.device:
        plugin.add_cpu_extension(args.cpu_extension)
    # Read IR
    net = IENetwork.from_ir(model=model_xml, weights=model_bin)
    assert len(
        net.inputs.keys()) == 1, "Sample supports only single input topologies"
    assert len(
        net.outputs) == 1, "Sample supports only single output topologies"
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))
    # Read and pre-process input image
    n, c, h, w = net.inputs[input_blob]
    image = cv2.imread(args.input)
    image = cv2.resize(image, (w, h))
    image = image.transpose((2, 0, 1))  # Change data layout from HWC to CHW
    image = image.reshape((n, c, h, w))
    # Load network to the plugin
    exec_net = plugin.load(network=net)
    del net
    # Start sync inference
    res = exec_net.infer(inputs={input_blob: image})
    top_ind = np.argsort(res[out_blob], axis=1)[0, -args.number_top:][::-1]
    for i in top_ind:
        print("%f #%d" % (res[out_blob][0, i], i))
    del exec_net
    del plugin
Exemple #5
0
def load_network(model_full_path, device, extensions, batch=1, new_shape=None):
    from openvino.inference_engine import IENetwork, IEPlugin

    #  Read in Graph file (IR)
    net = IENetwork.from_ir(model=model_full_path + ".xml",
                            weights=model_full_path + ".bin")

    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))

    #  Plugin initialization for specified device and load extensions library if needed
    plugin = IEPlugin(device=device.upper())

    if 'extension library' in extensions[device]:
        plugin.add_cpu_extension(extensions[device]['extension library'])

    if device.upper() == 'MYRIAD':
        # TODO: set to true if you want to use multiple NCS devices
        # plugin.set_config({"VPU_FORCE_RESET": "YES"})
        exec_net = plugin.load(network=net)
    else:
        net.batch_size = batch

        exec_net = plugin.load(network=net)
        # exec_net = plugin.load(network=net, config={'DYN_BATCH_ENABLED': 'YES'})

    del net
    return plugin, exec_net, input_blob, out_blob
Exemple #6
0
    def __init__(self, model, weights):
        self._model = model
        self._weights = weights

        IE_PLUGINS_PATH = os.getenv("IE_PLUGINS_PATH")
        if not IE_PLUGINS_PATH:
            raise OSError("Inference engine plugin path env not found in the system.")

        plugin = IEPlugin(device="CPU", plugin_dirs=[IE_PLUGINS_PATH])
        if (self._check_instruction("avx2")):
            plugin.add_cpu_extension(os.path.join(IE_PLUGINS_PATH, "libcpu_extension_avx2.so"))
        elif (self._check_instruction("sse4")):
            plugin.add_cpu_extension(os.path.join(IE_PLUGINS_PATH, "libcpu_extension_sse4.so"))
        else:
            raise Exception("Inference engine requires a support of avx2 or sse4.")

        network = IENetwork.from_ir(model=self._model, weights=self._weights)
        supported_layers = plugin.get_supported_layers(network)
        not_supported_layers = [l for l in network.layers.keys() if l not in supported_layers]
        if len(not_supported_layers) != 0:
            raise Exception("Following layers are not supported by the plugin for specified device {}:\n {}".
                      format(plugin.device, ", ".join(not_supported_layers)))

        self._input_blob_name = next(iter(network.inputs))
        self._output_blob_name = next(iter(network.outputs))

        self._net = plugin.load(network=network, num_requests=2)
        input_type = network.inputs[self._input_blob_name]
        self._input_layout = input_type if isinstance(input_type, list) else input_type.shape
Exemple #7
0
    def __init__(self, description, weights, interpreter,
            plugins_path=None, device=None, model_dir=None):
        model_dir = model_dir or ''
        if not osp.isfile(description):
            description = osp.join(model_dir, description)
        if not osp.isfile(description):
            raise Exception('Failed to open model description file "%s"' % \
                (description))

        if not osp.isfile(weights):
            weights = osp.join(model_dir, weights)
        if not osp.isfile(weights):
            raise Exception('Failed to open model weights file "%s"' % \
                (weights))

        if not osp.isfile(interpreter):
            interpreter = osp.join(model_dir, interpreter)
        if not osp.isfile(interpreter):
            raise Exception('Failed to open model interpreter script file "%s"' % \
                (interpreter))

        self._interpreter = InterpreterScript(interpreter)

        self._device = device or 'CPU'

        self._ie = IECore()
        if hasattr(self._ie, 'read_network'):
            self._network = self._ie.read_network(description, weights)
        else: # backward compatibility
            from openvino.inference_engine import IENetwork
            self._network = IENetwork.from_ir(description, weights)
        self._check_model_support(self._network, self._device)
        self._load_executable_net()
    def __init__(self, cpu_lib, detector_xml, detection_threshold):
        """
        Initialize openvino detector, load configuration network and weights
        :param cpu_lib:
        :param detector_xml:
        :param detection_threshold:
        """
        # Plugin initialization for specified device and load extensions library if specified
        plugin = IEPlugin(device="CPU")
        plugin.add_cpu_extension(cpu_lib)

        # Read detector IR
        detector_bin = os.path.splitext(detector_xml)[0] + ".bin"
        detector_net = IENetwork.from_ir(model=detector_xml,
                                         weights=detector_bin)

        self.d_in = next(iter(detector_net.inputs))
        self.d_out = next(iter(detector_net.outputs))
        detector_net.batch_size = 1

        # Read and pre-process input images
        self.d_n, self.d_c, self.d_h, self.d_w = detector_net.inputs[
            self.d_in].shape
        self.d_images = np.ndarray(shape=(self.d_n, self.d_c, self.d_h,
                                          self.d_w))

        # Loading models to the plugin
        self.d_exec_net = plugin.load(network=detector_net)

        self.detection_threshold = detection_threshold
    def run_ie_on_dataset(model_xml,
                          model_bin,
                          cpu_extension_path,
                          images_dir,
                          prob_threshold=0.01):
        plugin = IEPlugin(device='CPU')
        plugin.add_cpu_extension(cpu_extension_path)
        net = IENetwork.from_ir(model=model_xml, weights=model_bin)
        assert len(net.inputs.keys()
                   ) == 1, "Sample supports only single input topologies"
        assert len(
            net.outputs) == 1, "Sample supports only single output topologies"
        input_blob = next(iter(net.inputs))
        out_blob = next(iter(net.outputs))
        exec_net = plugin.load(network=net, num_requests=2)
        num, chs, height, width = net.inputs[input_blob]
        del net
        cur_request_id = 0

        detection_data = []
        for image in os.listdir(images_dir):
            im_path = os.path.join(images_dir, image)
            frame = cv2.imread(im_path)
            initial_h, initial_w, _ = frame.shape
            in_frame = cv2.resize(frame, (width, height))
            in_frame = in_frame.transpose(
                (2, 0, 1))  # Change data layout from HWC to CHW
            in_frame = in_frame.reshape((num, chs, height, width))

            objects_per_image = []
            exec_net.start_async(request_id=cur_request_id,
                                 inputs={input_blob: in_frame})

            if exec_net.requests[cur_request_id].wait(-1) == 0:
                res = exec_net.requests[cur_request_id].outputs[out_blob]
                for obj in res[0][0]:
                    if obj[2] > prob_threshold:
                        xmin = int(obj[3] * initial_w)
                        ymin = int(obj[4] * initial_h)
                        xmax = int(obj[5] * initial_w)
                        ymax = int(obj[6] * initial_h)
                        class_id = int(obj[1])
                        conf = obj[2]
                        objects_per_image.append({
                            'bbox': [xmin, ymin, xmax, ymax],
                            'class_id':
                            class_id,
                            'score':
                            conf
                        })

            det_item = {'image': im_path, 'objects': objects_per_image}
            detection_data.append(det_item)

        del exec_net
        del plugin

        return detection_data
Exemple #10
0
def main():
    args = build_argparser().parse_args()
    assert args.device.split(':')[0] == "HETERO", "This sample supports only Hetero Plugin. " \
                                                  "Please specify correct device, e.g. HETERO:FPGA,CPU"
    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"

    # Plugin initialization for specified device and load extensions library if specified
    plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
    if args.cpu_extension and 'CPU' in args.device:
        plugin.add_cpu_extension(args.cpu_extension)
    # Read IR
    net = IENetwork.from_ir(model=model_xml, weights=model_bin)

    layers = net.get_layers()
    net_ops = set([l['type'] for l in layers.values()])
    if not any(op in net_ops for op in ("Convolution", "Concat")):
        print("Specified IR doesn't contain any Convolution or Concat operations for which affinity going to be set.\n"
              "Try to use another topology to make the affinity setting result more visible.")

    # Configure the plugin to initialize default affinity for network in set_initial_affinity() function.
    plugin.set_config({"TARGET_FALLBACK": args.device.split(':')[1]})
    # Enable graph visualization
    plugin.set_config({"HETERO_DUMP_GRAPH_DOT": "YES"})
    plugin.set_initial_affinity(net)

    net.set_affinity(types_affinity_map={"Convolution": "GPU", "Concat": "CPU"})
    # Affinity setting example based on layer name.
    # layers_affinity_map has higher priority and will overrides affinity set by layer type.
    # net.set_affinity(types_affinity_map={"Convolution": "GPU", "Concat": "CPU"},
    #                  layers_affinity_map={"fire4/expand3x3/Conv2D": "CPU"})

    assert len(net.inputs.keys()) == 1, "Sample supports only single input topologies"
    assert len(net.outputs) == 1, "Sample supports only single output topologies"
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))
    # Read and pre-process input image
    n, c, h, w = net.inputs[input_blob]
    image = cv2.imread(args.input)
    image = cv2.resize(image, (w, h))
    image = image.transpose((2, 0, 1))  # Change data layout from HWC to CHW
    image = image.reshape((n, c, h, w))
    # Load network to the plugin
    exec_net = plugin.load(network=net)
    del net
    # Start sync inference
    res = exec_net.infer(inputs={input_blob: image})
    top_ind = np.argsort(res[out_blob], axis=1)[0, -args.number_top:][::-1]
    for i in top_ind:
        print("%f #%d" % (res[out_blob][0, i], i))
    del exec_net
    del plugin
    cwd = os.getcwd()
    print(
        "Graphs representing default and resulting affinities dumped to {} and {} files respectively"
            .format(os.path.join(cwd, 'hetero_affinity.dot'), os.path.join(cwd, 'hetero_subgraphs.dot'))
    )
Exemple #11
0
    def __init__(self,
                 cpu_lib="/opt/intel/openvino_2019.3.376/deployment_tools/inference_engine/lib/intel64/libcpu_extension_avx2.so",
                 landmarks_xml="openvino_detectors/landmarks-regression/FP32/model.xml",
                 features_xml="openvino_detectors/face-reidentification/FP32/model.xml"):

        # Plugin initialization for specified device and load extensions library if specified
        plugin = IEPlugin(device="CPU")
        plugin.add_cpu_extension(cpu_lib)

        # Read landmarks IR
        landmarks_bin = os.path.splitext(landmarks_xml)[0] + ".bin"
        log.info("Loading landmarks network files:\n\t{}\n\t{}".format(landmarks_xml, landmarks_bin))
        landmarks_net = IENetwork.from_ir(model=landmarks_xml, weights=landmarks_bin)

        # Read features IR
        features_bin = os.path.splitext(features_xml)[0] + ".bin"
        log.info("Loading features network files:\n\t{}\n\t{}".format(features_xml, features_bin))
        features_net = IENetwork.from_ir(model=features_xml, weights=features_bin)
        self.l_in = next(iter(landmarks_net.inputs))
        self.l_out = next(iter(landmarks_net.outputs))
        landmarks_net.batch_size = 1

        self.f_in = next(iter(features_net.inputs))
        self.f_out = next(iter(features_net.outputs))
        features_net.batch_size = 1
        cur = landmarks_net.inputs[self.l_in]
        self.l_n = cur.layout
        self.l_c, self.l_h, self.l_w = cur.shape[1:]
        # self.l_n = NCHW it is 1
        self.l_images = np.ndarray(shape=(1, self.l_c, self.l_h, self.l_w))

        cur = features_net.inputs[self.f_in]
        self.f_n = cur.layout
        self.f_c, self.f_h, self.f_w = cur.shape[1:]

        self.f_images = np.ndarray(shape=(1, self.f_c, self.f_h, self.f_w))

        # Loading models to the plugin
        log.info("Loading models to the plugin")
        self.l_exec_net = plugin.load(network=landmarks_net)
        self.f_exec_net = plugin.load(network=features_net)

        self.face_aligner = FaceAligner(face_width=self.f_w, face_height=self.f_h)
        self.vectors = {}
    def __init__(self, model_name, device='CPU'):
        model_xml = model_name + '.xml'
        model_bin = model_name + '.bin'

        plugin = IEPlugin(device=device)
        net = IENetwork.from_ir(model=model_xml, weights=model_bin)

        self.input_blob = next(iter(net.inputs))
        self.out_blob = next(iter(net.outputs))

        self.exec_net = plugin.load(network=net)
Exemple #13
0
def greengrass_classification_sample_run():
    client.publish(topic=topic_name, payload="OpenVINO: Initializing...")
    model_bin = os.path.splitext(PARAM_MODEL_XML)[0] + ".bin"

    # Plugin initialization for specified device and load extensions library if specified
    plugin = IEPlugin(device=PARAM_DEVICE, plugin_dirs="")
    if "CPU" in PARAM_DEVICE:
        plugin.add_cpu_extension(PARAM_CPU_EXTENSION_PATH)
    # Read IR
    net = IENetwork.from_ir(model=PARAM_MODEL_XML, weights=model_bin)
    assert len(
        net.inputs.keys()) == 1, "Sample supports only single input topologies"
    assert len(
        net.outputs) == 1, "Sample supports only single output topologies"
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))
    # Read and pre-process input image
    n, c, h, w = net.inputs[input_blob]
    cap = cv2.VideoCapture(PARAM_INPUT_SOURCE)
    exec_net = plugin.load(network=net)
    del net
    client.publish(topic=topic_name,
                   payload="Starting inference on %s" % PARAM_INPUT_SOURCE)
    start_time = timeit.default_timer()
    while (cap.isOpened()):
        ret, frame = cap.read()
        if not ret:
            break
        frame = cv2.resize(frame, (w, h))
        in_frame = frame.transpose(
            (2, 0, 1))  # Change data layout from HWC to CHW
        in_frame = in_frame.reshape((n, c, h, w))

        # Start sync inference
        res = exec_net.infer(inputs={input_blob: in_frame})
        top_ind = np.argsort(res[out_blob],
                             axis=1)[0, -PARAM_NUM_TOP_RESULTS:][::-1]
        res_json = []
        for i in top_ind:
            res_json.append({
                "confidence": round(res[out_blob][0, i], 2),
                "label": i
            })

        # Measure elapsed seconds since the last report
        seconds_elapsed = timeit.default_timer() - start_time
        if seconds_elapsed >= reporting_interval:
            report(res_json, frame)
            start_time = timeit.default_timer()

    client.publish(topic=topic_name, payload="End of the input, exiting...")
    del exec_net
    del plugin
Exemple #14
0
def main():
    # line for log configuration
    log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
    # parser for the arguments
    args = build_argparser().parse_args()
    # get xml model argument
    model_xml = args.model
    model2_xml = "C:\Intel\computer_vision_sdk_2018.4.420\deployment_tools\intel_models\emotions-recognition-retail-0003\FP32\emotions-recognition-retail-0003.xml"
    #model2_xml = "C:\Intel\computer_vision_sdk_2018.4.420\deployment_tools\intel_models\head-pose-estimation-adas-0001\FP32\head-pose-estimation-adas-0001.xml"
    #model2_xml = "C:\Intel\computer_vision_sdk_2018.4.420\deployment_tools\intel_models/age-gender-recognition-retail-0013\FP32/age-gender-recognition-retail-0013.xml"
    #model2_xml = "C:\Intel\computer_vision_sdk_2018.4.420\deployment_tools\intel_models/facial-landmarks-35-adas-0001\FP32/facial-landmarks-35-adas-0001.xml"
    # get weight model argument
    model_bin = os.path.splitext(model_xml)[0] + ".bin"
    model2_bin = os.path.splitext(model2_xml)[0] + ".bin"
    # Hardware plugin initialization for specified device and
    log.info("Initializing plugin for {} device...".format(args.device))
    plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
    # load extensions library if specified
    if args.cpu_extension and 'CPU' in args.device:
        plugin.add_cpu_extension(args.cpu_extension)
    # Read intermediate representation of the model
    log.info("Reading IR...")
    net = IENetwork.from_ir(model=model_xml, weights=model_bin)
    net2 = IENetwork.from_ir(model=model2_xml, weights=model2_bin)
    # check if the model is supported
    if plugin.device == "CPU":
        supported_layers = plugin.get_supported_layers(net)
        not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]
        if len(not_supported_layers) != 0:
            log.error("Following layers are not supported by the plugin for specified device {}:\n {}".
                      format(plugin.device, ', '.join(not_supported_layers)))
            log.error("Please try to specify cpu extensions library path in demo's command line parameters using -l "
                      "or --cpu_extension command line argument")
            sys.exit(1)
    # check if the input and output of model is the right format, here we expect just one input (one image) and one output type (bounding boxes)
    assert len(net.inputs.keys()) == 1, "Demo supports only single input topologies"
    assert len(net.outputs) <= 3, "Demo supports only single output topologies"
    # start the iterator on the input nodes
    input_blob = next(iter(net.inputs))
    input_blob2 = next(iter(net2.inputs))
Exemple #15
0
 def build(cls, model_xml, model_bin):
     plugin = IEPlugin(device=DEVICE, plugin_dirs=PLUGIN_DIR)
     if CPU_EXTENSION and 'CPU' in DEVICE:
         plugin.add_cpu_extension(CPU_EXTENSION)
     net = IENetwork.from_ir(model=model_xml, weights=model_bin)
     inputs = net.inputs
     batch_size = list(inputs.values())[0][0]
     outputs = net.outputs
     exec_net = plugin.load(network=net, num_requests=batch_size)
     ir_engine = cls(model_xml=model_xml,
                     model_bin=model_bin,
                     exec_net=exec_net,
                     inputs=inputs,
                     outputs=outputs)
     return ir_engine
Exemple #16
0
 def load_openvino_reid(self):
     try:
         ie = IECore()
         net = IENetwork.from_ir(model=self.model_configfile_reid,
                                 weights=self.model_modelfile_reid)
         self.ov_input_blob_reid = next(iter(net.inputs))
         self.out_blob_reid = next(iter(net.outputs))
         self.net_reid = ie.load_network(network=net,
                                         num_requests=2,
                                         device_name="CPU")
         # Read and pre-process input image
         self.ov_n_reid, self.ov_c_reid, self.ov_h_reid, self.ov_w_reid = net.inputs[
             self.ov_input_blob_reid].shape
         del net
     except Exception as e:
         raise Exception(f"Load Openvino reidentification error:{e}")
    def __init__(self, model_path=DEFAULT_MODEL_PATH):
        """Set up model.

        Args:
            model_path (str): Path to model.

        """
        self.path = model_path
        plugin = IEPlugin("CPU")
        net = IENetwork.from_ir(model="sn.xml", weights="sn.bin")
        assert len(net.inputs.keys()) == 1
        assert len(net.outputs) == 1 
        self.input_blob = next(iter(net.inputs))
#	out_blob = next(iter(net.outputs))
        self.exec_net = plugin.load(network=net)
        del net
 def build(cls, model_xml, model_bin, mapping_config):
     plugin = IEPlugin(device=DEVICE, plugin_dirs=PLUGIN_DIR)
     if CPU_EXTENSION and 'CPU' in DEVICE:
         plugin.add_cpu_extension(CPU_EXTENSION)
     net = IENetwork.from_ir(model=model_xml, weights=model_bin)
     input_blob = next(iter(net.inputs))
     batch_size = net.inputs[input_blob].shape[0]
     inputs = net.inputs
     outputs = net.outputs
     exec_net = plugin.load(network=net, num_requests=batch_size)
     ir_engine = cls(model_xml=model_xml,
                     model_bin=model_bin,
                     mapping_config=mapping_config,
                     exec_net=exec_net,
                     inputs=inputs,
                     outputs=outputs)
     return ir_engine
def setup_openvino():
    app.logger.info('in setup_openvino')
    args = {}
    args['model'] = app.config['model']
    args['device'] = app.config['device']
    args['cpu_extension'] = app.config['cpu_extension']
    args['plugin_dir'] = app.config['plugin_dir']

    app.logger.info("Using model: {}".format(args['model']))
    model_xml = args['model']
    model_bin = os.path.splitext(model_xml)[0] + ".bin"

    # Plugin initialization for specified device and load extensions library if specified
    app.logger.info("Initializing plugin for {} device...".format(
        args['device']))

    plugin = IEPlugin(device=args['device'], plugin_dirs=args['plugin_dir'])
    if (plugin != None):
        app.logger.info("plugin successfully initialized")
    else:
        app.logger.info("error initializing plugin")

    if args['cpu_extension'] and 'CPU' in args['device']:
        plugin.add_cpu_extension(args['cpu_extension'])
    # Read IR
    print("Reading IR...")
    net = IENetwork.from_ir(model=model_xml, weights=model_bin)
    assert len(
        net.inputs.keys()) == 1, "Sample supports only single input topologies"
    assert len(
        net.outputs) == 1, "Sample supports only single output topologies"
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))
    print("Loading IR to the plugin...")
    exec_net = plugin.load(network=net, num_requests=1)
    # Read and pre-process input image
    n, c, h, w = net.inputs[input_blob]
    del net
    cache['exec_net'] = exec_net
    cache['n'] = n
    cache['c'] = c
    cache['h'] = h
    cache['w'] = w
    cache['input_blob'] = input_blob
    cache['out_blob'] = out_blob
    cache['plugin'] = plugin
    def infer_init(self):

        plugin = IEPlugin(device='MYRIAD')

        net = IENetwork.from_ir(model=self.model_xml_path,
                                weights=self.model_bin_path)

        exec_net = plugin.load(network=net)

        assert len(self.net.inputs.keys()) == 1

        assert len(self.net.outputs) == 1

        input_blob = next(iter(net.inputs))

        # input_blob = 'input'

        out_blob = next(iter(net.outputs))
  def run_ie_on_dataset(model_xml, model_bin, cpu_extension_path, images_dir, prob_threshold=0.01):
    plugin = IEPlugin(device='CPU')
    plugin.add_cpu_extension(cpu_extension_path)
    net = IENetwork.from_ir(model=model_xml, weights=model_bin)
    assert len(net.inputs.keys()) == 1, "Sample supports only single input topologies"
    assert len(net.outputs) == 1, "Sample supports only single output topologies"
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))
    exec_net = plugin.load(network=net, num_requests=2)
    num, chs, height, width = net.inputs[input_blob]
    del net
    cur_request_id = 0

    detection_data = []
    for image in os.listdir(images_dir):
      im_path = os.path.join(images_dir, image)
      frame = cv2.imread(im_path)
      initial_h, initial_w, _ = frame.shape
      in_frame = cv2.resize(frame, (width, height))
      in_frame = in_frame.transpose((2, 0, 1))  # Change data layout from HWC to CHW
      in_frame = in_frame.reshape((num, chs, height, width))

      objects_per_image = []
      exec_net.start_async(request_id=cur_request_id, inputs={input_blob: in_frame})

      if exec_net.requests[cur_request_id].wait(-1) == 0:
        res = exec_net.requests[cur_request_id].outputs[out_blob]
        for obj in res[0][0]:
          if obj[2] > prob_threshold:
            xmin = int(obj[3] * initial_w)
            ymin = int(obj[4] * initial_h)
            xmax = int(obj[5] * initial_w)
            ymax = int(obj[6] * initial_h)
            class_id = int(obj[1])
            conf = obj[2]
            objects_per_image.append({'bbox': [xmin, ymin, xmax, ymax], 'class_id': class_id, 'score': conf})

      det_item = {'image': im_path, 'objects': objects_per_image}
      detection_data.append(det_item)

    del exec_net
    del plugin

    return detection_data
Exemple #22
0
    def __init__(self, jid, data):
        self.__threshold = data["threshold"]
        self.__max_distance = data["maxDistance"]
        self.__frame_urls = {}
        self.__frame_boxes = {}

        db_job = Job.objects.select_related('segment__task').get(pk=jid)
        db_segment = db_job.segment
        db_task = db_segment.task

        self.__stop_frame = db_segment.stop_frame

        for root, _, filenames in os.walk(db_task.get_data_dirname()):
            for filename in fnmatch.filter(filenames, '*.jpg'):
                frame = int(os.path.splitext(filename)[0])
                if frame >= db_segment.start_frame and frame <= db_segment.stop_frame:
                    self.__frame_urls[frame] = os.path.join(root, filename)

        for frame in self.__frame_urls:
            self.__frame_boxes[frame] = [
                box for box in data["boxes"] if box["frame"] == frame
            ]

        IE_PLUGINS_PATH = os.getenv('IE_PLUGINS_PATH', None)
        REID_MODEL_DIR = os.getenv('REID_MODEL_DIR', None)

        if not IE_PLUGINS_PATH:
            raise Exception(
                "Environment variable 'IE_PLUGINS_PATH' isn't defined")
        if not REID_MODEL_DIR:
            raise Exception(
                "Environment variable 'REID_MODEL_DIR' isn't defined")

        REID_XML = os.path.join(REID_MODEL_DIR, "reid.xml")
        REID_BIN = os.path.join(REID_MODEL_DIR, "reid.bin")

        self.__plugin = IEPlugin(device="CPU", plugin_dirs=[IE_PLUGINS_PATH])
        network = IENetwork.from_ir(model=REID_XML, weights=REID_BIN)
        self.__input_blob_name = next(iter(network.inputs))
        self.__output_blob_name = next(iter(network.outputs))
        self.__input_height, self.__input_width = network.inputs[
            self.__input_blob_name].shape[-2:]
        self.__executable_network = self.__plugin.load(network=network)
        del network
Exemple #23
0
def main():
    args = build_argparser().parse_args()
    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"
    # Plugin initialization for specified device and load extensions library if specified
    print("Initializing plugin for {} device...".format(args.device))
    plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
    if args.cpu_extension and 'CPU' in args.device:
        plugin.add_cpu_extension(args.cpu_extension)
    # Read IR
    print("Reading IR...")
    net = IENetwork.from_ir(model=model_xml, weights=model_bin)
    assert len(
        net.inputs.keys()) == 1, "Sample supports only single input topologies"
    assert len(
        net.outputs) == 1, "Sample supports only single output topologies"
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))
    print("Loading IR to the plugin...")
    exec_net = plugin.load(network=net)
    # Read and pre-process input image
    n, c, h, w = net.inputs[input_blob]
    img = cv2.imread(args.input)
    img = cv2.resize(img, (w, h))
    img = img.transpose((2, 0, 1))  # Change data layout from HWC to CHW
    img = img.reshape((n, c, h, w))
    res = np.squeeze(exec_net.infer(inputs={input_blob: img})[out_blob],
                     axis=0)
    # Post-process results
    print("Results post-processing...")
    classes_map = np.zeros(shape=(h, w, c), dtype=np.int)
    for i in range(h):
        for j in range(w):
            if len(res[:, i, j]) == 1:
                pixel_class = int(res[:, i, j])
            else:
                pixel_class = np.argmax(res[:, i, j])
            classes_map[i, j, :] = classes_color_map[min(pixel_class, 20)]
    cv2.imwrite("out.jpg", classes_map)
    print("Result image was saved to  {}".format(
        os.path.join(os.path.dirname(__file__), "out.jpg")))
    del exec_net
    del plugin
Exemple #24
0
    def __init__(self, jid, data):
        self.__threshold = data["threshold"]
        self.__max_distance = data["maxDistance"]

        self.__frame_boxes = {}

        db_job = Job.objects.select_related('segment__task').get(pk=jid)
        db_segment = db_job.segment
        db_task = db_segment.task
        self.__frame_iter = itertools.islice(
            FrameProvider(db_task.data).get_frames(
                FrameProvider.Quality.ORIGINAL),
            db_segment.start_frame,
            db_segment.stop_frame + 1,
        )

        self.__stop_frame = db_segment.stop_frame
        for frame in range(db_segment.start_frame, db_segment.stop_frame + 1):
            self.__frame_boxes[frame] = [
                box for box in data["boxes"] if box["frame"] == frame
            ]

        IE_PLUGINS_PATH = os.getenv('IE_PLUGINS_PATH', None)
        REID_MODEL_DIR = os.getenv('REID_MODEL_DIR', None)

        if not IE_PLUGINS_PATH:
            raise Exception(
                "Environment variable 'IE_PLUGINS_PATH' isn't defined")
        if not REID_MODEL_DIR:
            raise Exception(
                "Environment variable 'REID_MODEL_DIR' isn't defined")

        REID_XML = os.path.join(REID_MODEL_DIR, "reid.xml")
        REID_BIN = os.path.join(REID_MODEL_DIR, "reid.bin")

        self.__plugin = IEPlugin(device="CPU", plugin_dirs=[IE_PLUGINS_PATH])
        network = IENetwork.from_ir(model=REID_XML, weights=REID_BIN)
        self.__input_blob_name = next(iter(network.inputs))
        self.__output_blob_name = next(iter(network.outputs))
        self.__input_height, self.__input_width = network.inputs[
            self.__input_blob_name].shape[-2:]
        self.__executable_network = self.__plugin.load(network=network)
        del network
Exemple #25
0
def create_model_openvino(name, size, alpha, precision, device, num_shots,
                          grids):
    if name in supported_models:
        weight_path = openvino_prefix + name + "-size" + str(
            int(size)) + "-alpha" + str(float(alpha)) + "-fp" + str(
                int(precision)) + "-ns" + str(int(num_shots)) + ".bin"
        xml_path = openvino_prefix + name + "-size" + str(
            int(size)) + "-alpha" + str(float(alpha)) + "-fp" + str(
                int(precision)) + "-ns" + str(int(num_shots)) + ".xml"

        try:
            plugin = IEPlugin(device, plugin_dirs=None)
        except:
            raise ErrorSignal(openvino_error)

        try:
            network = IENetwork.from_ir(model=xml_path, weights=weight_path)
        except IOError:
            raise ErrorSignal(unknown_model_error)
        except:
            raise ErrorSignal(openvino_error)

        try:
            input_name = next(iter(network.inputs))
            model = plugin.load(network=network)
        except:
            raise ErrorSignal(openvino_error)

        del network

        if name.split('-')[0] == facedetection_prefix:
            return FaceDetectionOpenVINOModel(input_name, model, num_shots,
                                              grids)
        if name.split('-')[0] == genderestimation_prefix:
            return GenderEstimationOpenVINOModel(input_name, model, num_shots,
                                                 grids)
        if name.split('-')[0] == emotionsrecognision_prefix:
            return EmotionsRecognisionOpenVINOModel(input_name, model,
                                                    num_shots, grids)
        if name.split('-')[0] == faceid_prefix:
            return FaceIDOpenVINOModel(input_name, model, num_shots, grids)
    else:
        raise ErrorSignal(model_notsupported_by_openvino)
def load_detection_model():
    global plugin
    global detection_net
    plugin = IEPlugin(device="MYRIAD")
    #########################################################################

    #########################  Load Neural Network  #########################
    #  Read in Graph file (IR)
    detection_net = IENetwork.from_ir(model="face-detection-adas-0001.xml",
                                      weights="face-detection-adas-0001.bin")

    global detection_input_blob
    global detection_out_blob
    detection_input_blob = next(iter(detection_net.inputs))
    detection_out_blob = next(iter(detection_net.outputs))
    #  Load network to the plugin
    global detection_exec_net
    #help(IEPlugin.load)
    detection_exec_net = plugin.load(network=detection_net, num_requests=1)
    del detection_net
Exemple #27
0
def main():
    args = build_argparser().parse_args()
    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"
    # Plugin initialization for specified device and load extensions library if specified
    print("Initializing plugin for {} device...".format(args.device))
    plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
    if args.cpu_extension and 'CPU' in args.device:
        plugin.add_cpu_extension(args.cpu_extension)
    # Read IR
    print("Reading IR...")
    net = IENetwork.from_ir(model=model_xml, weights=model_bin)
    assert len(
        net.inputs.keys()) == 1, "Sample supports only single input topologies"
    assert len(
        net.outputs) == 1, "Sample supports only single output topologies"
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))
    print("Loading IR to the plugin...")
    exec_net = plugin.load(network=net)
    # Read and pre-process input image
    n, c, h, w = net.inputs[input_blob]
    img = cv2.imread(args.input)
    img = cv2.resize(img, (w, h))
    img = img.transpose((2, 0, 1))  # Change data layout from HWC to CHW
    img = img.reshape((n, c, h, w))
    res = exec_net.infer(inputs={input_blob: img})[out_blob]
    # Post process output
    res = np.squeeze(res, axis=0)  # Remove batch dimension
    # Clip values to [0, 255] range
    res = np.swapaxes(res, 0, 2)
    res = np.swapaxes(res, 0, 1)
    res = cv2.cvtColor(res, cv2.COLOR_BGR2RGB)
    res[res < 0] = 0
    res[res > 255] = 255
    cv2.imwrite("out.jpg", res)
    print("Output image was saved to {}".format(
        os.path.join(os.path.dirname(__file__), "out.jpg")))
    # Explicit object deleting required to guarantee that plugin will not be deleted before executable network
    del exec_net
    del plugin
def main():
    args = build_argparser().parse_args()
    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"

    # Plugin initialization for specified device and load extensions library if specified
    plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
    if args.cpu_extension and 'CPU' in args.device:
        plugin.add_cpu_extension(args.cpu_extension)

    # Read IR
    net = IENetwork.from_ir(model=model_xml, weights=model_bin)
    assert len(net.inputs.keys()) == 1, "Sample supports only single input topologies"
    assert len(net.outputs) == 1, "Sample supports only single output topologies"
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))

    # Read and pre-process input image
    n, c, h, w = net.inputs[input_blob]
    image = cv2.imread(args.input)
    image = cv2.resize(image, (w, h))
    image = image.transpose((2, 0, 1))  # Change data layout from HWC to CHW
    image = image.reshape((n, c, h, w))

    # Load network to the plugin
    exec_net = plugin.load(network=net, num_requests=1)
    del net
    # Start async inference for first req
    infer_request_handle = exec_net.start_async(request_id=0, inputs={input_blob: image})

    # Wait until infer ends. Equivalent to Wait(IInferRequest::WaitMode::RESULT_READY)
    infer_status = infer_request_handle.wait()
    assert infer_status == 0, "Async infer request execution fails"

    # Get output results results from infer request
    res = infer_request_handle.outputs[out_blob]
    top_ind = np.argsort(res, axis=1)[0, -args.number_top:][::-1]
    for i in top_ind:
        print("%f #%d" % (res[0, i], i))
    del exec_net
    del plugin
    def __init__(self):
        # Plugin initialization for specified device and load extensions library if specified
        plugin = IEPlugin(device="CPU")
        plugin.add_cpu_extension('openvino/lib/libcpu_extension_avx2.so')

        # Read detector IR
        detector_bin = 'openvino/model/person-vehicle-bike-detection-crossroad-0078.bin'
        detector_xml = 'openvino/model/person-vehicle-bike-detection-crossroad-0078.xml'
        detector_net = IENetwork.from_ir(model=detector_xml,
                                         weights=detector_bin)

        self.d_in = next(iter(detector_net.inputs))
        self.d_out = next(iter(detector_net.outputs))
        detector_net.batch_size = 1

        # Read and pre-process input images
        self.d_n, self.d_c, self.d_h, self.d_w = detector_net.inputs[self.d_in]
        self.d_images = np.ndarray(shape=(self.d_n, self.d_c, self.d_h,
                                          self.d_w))

        # Loading models to the plugin
        self.d_exec_net = plugin.load(network=detector_net)
def load_recognition_model():
    #######################  Device  Initialization  ########################
    #  Plugin initialization for specified device and load extensions library if specified

    global recognition_net

    #########################################################################

    #########################  Load Neural Network  #########################
    #  Read in Graph file (IR)
    recognition_net = IENetwork.from_ir(
        model="face-reidentification-retail-0095.xml",
        weights="face-reidentification-retail-0095.bin")

    global input_blob
    global out_blob
    input_blob = next(iter(recognition_net.inputs))
    out_blob = next(iter(recognition_net.outputs))
    #  Load network to the plugin
    global exec_net
    exec_net = plugin.load(network=recognition_net, num_requests=1)
    del recognition_net
Exemple #31
0
    def __init__(self, device, getFrameFunc):
        self.getFrameFunc = getFrameFunc
        self.originFrame = None
        self.processedFrame = None
        ############### 모델이름 Please write mobilenet imageclassifcation xml and bin file##############
        model_xml = './models/mobilenetv2.xml'
        model_bin = './models/mobilenetv2.bin'

        cpu_extension = None  # '/opt/intel/openvino/inference_engine/lib/intel64/libcpu_extension_sse4.so'
        ############### 레이블이름 Please write label fiel ################################################
        with open('./models/mobilenetv2.txt', 'rt', encoding='utf-8') as f:
            lines = f.readlines()
        self.labels = list(map(lambda x: x.replace('\n', ''), lines))

        net = IENetwork.from_ir(model=model_xml, weights=model_bin)
        assert len(net.inputs.keys()) == 1
        assert len(net.outputs) == 1

        plugin = IEPlugin(device=device)
        if cpu_extension and 'CPU' in device:
            plugin.add_cpu_extension(cpu_extension)

        self.exec_net = plugin.load(network=net)
        #del net

        self.input_blob = next(iter(net.inputs))
        self.out_blob = next(iter(net.outputs))
        print("Loading IR to the plugin...")
        n, c, self.h, self.w = net.inputs[self.input_blob].shape  #?? 겠다
        print(n, c, self.h, self.w)

        self.sortedClassifiedList = []
        self.infer_time = 0
        self.inferFPS = 15

        processThread = threading.Thread(target=self.inferenceThread)
        processThread.daemon = True
        processThread.start()
def main():
  log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
  args = build_argparser().parse_args()
  model_xml = args.model
  model_bin = os.path.splitext(model_xml)[0] + ".bin"
  # Plugin initialization for specified device and load extensions library if specified
  log.info("Initializing plugin for {} device...".format(args.device))
  plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
  if args.cpu_extension and 'CPU' in args.device:
    plugin.add_cpu_extension(args.cpu_extension)

  # Read IR
  log.info("Reading IR...")
  net = IENetwork.from_ir(model=model_xml, weights=model_bin)

  if "CPU" in plugin.device:
    supported_layers = plugin.get_supported_layers(net)
    not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]
    if len(not_supported_layers) != 0:
      log.error("Following layers are not supported by the plugin for specified device {}:\n {}".
                format(plugin.device, ', '.join(not_supported_layers)))
      log.error("Please try to specify cpu extensions library path in sample's command line parameters using -l "
                "or --cpu_extension command line argument")
      sys.exit(1)
  assert len(net.inputs.keys()) == 1, "Sample supports only single input topologies"
  assert len(net.outputs) == 1, "Sample supports only single output topologies"
  input_blob = next(iter(net.inputs))
  out_blob = next(iter(net.outputs))
  log.info("Loading IR to the plugin...")
  exec_net = plugin.load(network=net, num_requests=2)
  # Read and pre-process input image
  n, c, h, w = net.inputs[input_blob]
  del net

  predictions = []
  data = Input(args.input_type, args.input)
  cur_request_id = 0

  fps = 25
  out_width = 640
  out_height = 480
  if args.dump_output_video:
    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    out = cv2.VideoWriter(args.path_to_output_video, fourcc, fps, (int(out_width), int(out_height)))

  while not data.is_finished():
    frame, img_id = data.get_next_item()
    initial_h, initial_w, channels = frame.shape
    in_frame = cv2.resize(frame, (w, h))
    in_frame = in_frame.transpose((2, 0, 1))  # Change data layout from HWC to CHW
    in_frame = in_frame.reshape((n, c, h, w))

    exec_net.start_async(request_id=cur_request_id, inputs={input_blob: in_frame})
    if exec_net.requests[cur_request_id].wait(-1) == 0:

      # Parse detection results of the current request
      res = exec_net.requests[cur_request_id].outputs[out_blob]
      coco_detections = []
      for obj in res[0][0]:
        # Draw only objects when probability more than specified threshold
        if obj[2] > args.prob_threshold:
          x1 = float(obj[3] * initial_w)
          y1 = float(obj[4] * initial_h)
          x2 = float(obj[5] * initial_w)
          y2 = float(obj[6] * initial_h)

          x_, y_ = round(x1, 1), round(y1, 1)
          w_ = round(x2 - x1, 1)
          h_ = round(y2 - y1, 1)
          class_id = int(obj[1])

          coco_det = {}
          coco_det['image_id'] = img_id
          coco_det['category_id'] = class_id
          coco_det['bbox'] = [x_, y_, w_, h_]
          coco_det['score'] = round(float(obj[2]), 1)
          coco_detections.append(coco_det)

          # Draw box and label\class_id
          cv2.rectangle(frame, (int(x1), int(y1)), (int(x2), int(y2)), (255, 0, 0), 2)
          cv2.putText(frame, str(class_id) + ' ' + str(round(obj[2] * 100, 1)) + ' %', (int(x1), int(y1) - 7),
                      cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
      predictions.extend(coco_detections)

    if args.dump_output_video:
      img_resized = cv2.resize(frame, (out_width, out_height))
      out.write(img_resized)
    if args.show:
      cv2.imshow("Detection Results", frame)
      key = cv2.waitKey(10)
      if key == 27:
        break

  if args.dump_predictions_to_json:
    with open(args.output_json_path, 'w') as output_file:
      json.dump(predictions, output_file, sort_keys=True, indent=4)

  cv2.destroyAllWindows()
  del exec_net
  del plugin