def run_ie_on_dataset(model_xml, model_bin, cpu_extension_path, images_dir, prob_threshold=0.01):
    plugin = IEPlugin(device='CPU')
    plugin.add_cpu_extension(cpu_extension_path)
    net = IENetwork.from_ir(model=model_xml, weights=model_bin)
    assert len(net.inputs.keys()) == 1, "Sample supports only single input topologies"
    assert len(net.outputs) == 1, "Sample supports only single output topologies"
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))
    exec_net = plugin.load(network=net, num_requests=2)
    num, chs, height, width = net.inputs[input_blob]
    del net
    cur_request_id = 0

    detection_data = []
    for image in os.listdir(images_dir):
      im_path = os.path.join(images_dir, image)
      frame = cv2.imread(im_path)
      initial_h, initial_w, _ = frame.shape
      in_frame = cv2.resize(frame, (width, height))
      in_frame = in_frame.transpose((2, 0, 1))  # Change data layout from HWC to CHW
      in_frame = in_frame.reshape((num, chs, height, width))

      objects_per_image = []
      exec_net.start_async(request_id=cur_request_id, inputs={input_blob: in_frame})

      if exec_net.requests[cur_request_id].wait(-1) == 0:
        res = exec_net.requests[cur_request_id].outputs[out_blob]
        for obj in res[0][0]:
          if obj[2] > prob_threshold:
            xmin = int(obj[3] * initial_w)
            ymin = int(obj[4] * initial_h)
            xmax = int(obj[5] * initial_w)
            ymax = int(obj[6] * initial_h)
            class_id = int(obj[1])
            conf = obj[2]
            objects_per_image.append({'bbox': [xmin, ymin, xmax, ymax], 'class_id': class_id, 'score': conf})

      det_item = {'image': im_path, 'objects': objects_per_image}
      detection_data.append(det_item)

    del exec_net
    del plugin

    return detection_data
def main():
  log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
  args = build_argparser().parse_args()
  model_xml = args.model
  model_bin = os.path.splitext(model_xml)[0] + ".bin"
  # Plugin initialization for specified device and load extensions library if specified
  log.info("Initializing plugin for {} device...".format(args.device))
  plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
  if args.cpu_extension and 'CPU' in args.device:
    plugin.add_cpu_extension(args.cpu_extension)

  # Read IR
  log.info("Reading IR...")
  net = IENetwork.from_ir(model=model_xml, weights=model_bin)

  if "CPU" in plugin.device:
    supported_layers = plugin.get_supported_layers(net)
    not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]
    if len(not_supported_layers) != 0:
      log.error("Following layers are not supported by the plugin for specified device {}:\n {}".
                format(plugin.device, ', '.join(not_supported_layers)))
      log.error("Please try to specify cpu extensions library path in sample's command line parameters using -l "
                "or --cpu_extension command line argument")
      sys.exit(1)
  assert len(net.inputs.keys()) == 1, "Sample supports only single input topologies"
  assert len(net.outputs) == 1, "Sample supports only single output topologies"
  input_blob = next(iter(net.inputs))
  out_blob = next(iter(net.outputs))
  log.info("Loading IR to the plugin...")
  exec_net = plugin.load(network=net, num_requests=2)
  # Read and pre-process input image
  n, c, h, w = net.inputs[input_blob]
  del net

  predictions = []
  data = Input(args.input_type, args.input)
  cur_request_id = 0

  fps = 25
  out_width = 640
  out_height = 480
  if args.dump_output_video:
    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    out = cv2.VideoWriter(args.path_to_output_video, fourcc, fps, (int(out_width), int(out_height)))

  while not data.is_finished():
    frame, img_id = data.get_next_item()
    initial_h, initial_w, channels = frame.shape
    in_frame = cv2.resize(frame, (w, h))
    in_frame = in_frame.transpose((2, 0, 1))  # Change data layout from HWC to CHW
    in_frame = in_frame.reshape((n, c, h, w))

    exec_net.start_async(request_id=cur_request_id, inputs={input_blob: in_frame})
    if exec_net.requests[cur_request_id].wait(-1) == 0:

      # Parse detection results of the current request
      res = exec_net.requests[cur_request_id].outputs[out_blob]
      coco_detections = []
      for obj in res[0][0]:
        # Draw only objects when probability more than specified threshold
        if obj[2] > args.prob_threshold:
          x1 = float(obj[3] * initial_w)
          y1 = float(obj[4] * initial_h)
          x2 = float(obj[5] * initial_w)
          y2 = float(obj[6] * initial_h)

          x_, y_ = round(x1, 1), round(y1, 1)
          w_ = round(x2 - x1, 1)
          h_ = round(y2 - y1, 1)
          class_id = int(obj[1])

          coco_det = {}
          coco_det['image_id'] = img_id
          coco_det['category_id'] = class_id
          coco_det['bbox'] = [x_, y_, w_, h_]
          coco_det['score'] = round(float(obj[2]), 1)
          coco_detections.append(coco_det)

          # Draw box and label\class_id
          cv2.rectangle(frame, (int(x1), int(y1)), (int(x2), int(y2)), (255, 0, 0), 2)
          cv2.putText(frame, str(class_id) + ' ' + str(round(obj[2] * 100, 1)) + ' %', (int(x1), int(y1) - 7),
                      cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
      predictions.extend(coco_detections)

    if args.dump_output_video:
      img_resized = cv2.resize(frame, (out_width, out_height))
      out.write(img_resized)
    if args.show:
      cv2.imshow("Detection Results", frame)
      key = cv2.waitKey(10)
      if key == 27:
        break

  if args.dump_predictions_to_json:
    with open(args.output_json_path, 'w') as output_file:
      json.dump(predictions, output_file, sort_keys=True, indent=4)

  cv2.destroyAllWindows()
  del exec_net
  del plugin
Exemplo n.º 3
0
def main():
    #line for log configuration
    log.basicConfig(format="[ %(levelname)s ] %(message)s",
                    level=log.INFO,
                    stream=sys.stdout)
    #parser for the arguments
    args = build_argparser().parse_args()
    #get xml model argument
    model_xml = args.model
    #get weight model argument
    model_bin = os.path.splitext(model_xml)[0] + ".bin"
    #Hardware plugin initialization for specified device and
    log.info("Initializing plugin for {} device...".format(args.device))
    plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
    #load extensions library if specified
    if args.cpu_extension and 'CPU' in args.device:
        plugin.add_cpu_extension(args.cpu_extension)
    #Read intermediate representation of the model
    log.info("Reading IR...")
    net = IENetwork.from_ir(model=model_xml, weights=model_bin)
    # check if the model is supported
    if plugin.device == "CPU":
        supported_layers = plugin.get_supported_layers(net)
        not_supported_layers = [
            l for l in net.layers.keys() if l not in supported_layers
        ]
        if len(not_supported_layers) != 0:
            log.error(
                "Following layers are not supported by the plugin for specified device {}:\n {}"
                .format(plugin.device, ', '.join(not_supported_layers)))
            log.error(
                "Please try to specify cpu extensions library path in demo's command line parameters using -l "
                "or --cpu_extension command line argument")
            sys.exit(1)
# check if the input and output of model is the right format, here we expect just one input (one image) and one output type (bounding boxes)
    assert len(
        net.inputs.keys()) == 1, "Demo supports only single input topologies"
    #assert len(net.outputs) == 1, "Demo supports only single output topologies"
    # start the iterator on the input nodes
    input_blob = next(iter(net.inputs))
    print(input_blob)
    # start the iterator on the output
    out_blob = next(iter(net.outputs))
    log.info("Loading IR to the plugin...")
    # load the network
    exec_net = plugin.load(network=net)
    # Read and pre-process input image
    n, c, h, w = net.inputs[input_blob].shape
    del net
    #take care of the input data (camera or video file)
    if args.input == 'cam':
        input_stream = 0
    else:
        input_stream = args.input
        assert os.path.isfile(args.input), "Specified input file doesn't exist"
#take care of the labels
    if args.labels:
        with open(args.labels, 'r') as f:
            labels_map = [x.strip() for x in f]
    else:
        labels_map = None

#opencv function to take care of the video reading/capture
    cap = cv2.VideoCapture(input_stream)

    log.info("Starting inference ")
    log.info("To stop the demo execution press Esc button")

    render_time = 0
    #open the camera
    ret, frame = cap.read()
    #if open, we loop over the incoming frames
    i = 1
    while cap.isOpened():
        #we get the frame
        ret, frame = cap.read()
        if not ret:
            break
#get frame size
        initial_w = cap.get(3)
        initial_h = cap.get(4)
        #start the time counter
        inf_start = time.time()
        #reshape the frame size and channels order to fit the model input
        in_frame = cv2.resize(frame, (w, h))
        in_frame = in_frame.transpose(
            (2, 0, 1))  # Change data layout from HWC to CHW
        in_frame = in_frame.reshape((n, c, h, w))

        #start the inference
        exec_net.infer(inputs={input_blob: in_frame})

        #stop the clock
        inf_end = time.time()
        det_time = inf_end - inf_start

        # Parse detection results
        res = exec_net.requests[0].outputs[out_blob]

        #iterate over the results
        for obj in res[0][0]:
            # check the object probability , if higher than threshold it will create the bounding box
            if obj[2] > args.prob_threshold:
                # class ID
                class_id = int(obj[1])
                #define top left corner column value
                xmin = int(obj[3] * initial_w)
                #define top left corner row value
                ymin = int(obj[4] * initial_h)
                #define bottom right  corner column value
                xmax = int(obj[5] * initial_w)
                #define bottom right corner row value
                ymax = int(obj[6] * initial_h)

                deltax = int((xmax - xmin) / 2)
                deltay = int((ymax - ymin) / 2)
                xmin = xmin - deltax
                xmax = xmax + deltax
                ymin = ymin + deltay
                ymax = ymax + deltay

                gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                gray = gray[ymin:ymax, xmin:xmax]

                ret, thresh = cv2.threshold(
                    gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)

                minSize = 128 * 128
                if thresh is not None and thresh.size > minSize and thresh.shape[
                        0] > int(
                            thresh.shape[1] / 2) and thresh.shape[0] > int(
                                thresh.shape[1] / 2):
                    #print(thresh.shape)
                    i = i + 1
                    if i % 5 == 0:
                        gray = cv2.resize(gray, (128, 100))
                        #cv2.imshow("littleframe",gray)
                        cv2.imwrite("images3/image" + str(i) + '.png', gray)

                # Draw box and label and class_id
                # color = (min(class_id * 12.5, 255), min(class_id * 7, 255), min(class_id * 5, 255))
                rd = np.random.randint(0, 2)
                color = (rd * 255, rd * 255, rd * 255)
                cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), color, 2)
                det_label = labels_map[class_id] if labels_map else str(
                    class_id)
                cv2.putText(
                    frame,
                    det_label + ' ' + str(round(obj[2] * 100, 1)) + ' %',
                    (xmin, ymin - 7), cv2.FONT_HERSHEY_COMPLEX, 0.6, color, 1)

            # Draw performance stats
            inf_time_message = "Inference time: {:.3f} ms".format(det_time *
                                                                  1000)

            cv2.putText(frame, inf_time_message, (15, 15),
                        cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1)

# show the result image
#cv2.imshow("Detection Results", frame)

        key = cv2.waitKey(1)
        if key == 27:
            break

    cv2.destroyAllWindows()
    del exec_net
    del plugin
Exemplo n.º 4
0
def main():
    args = build_argparser().parse_args()

    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"

    # ------------- 1. Plugin initialization for specified device and load extensions library if specified -------------
    log.info("Creating Inference Engine...")
    ie = IECore()
    if args.cpu_extension and 'CPU' in args.device:
        ie.add_extension(args.cpu_extension, "CPU")

    # -------------------- 2. Reading the IR generated by the Model Optimizer (.xml and .bin files) --------------------
    log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
    net = IENetwork(model=model_xml, weights=model_bin)

    # ---------------------------------- 3. Load CPU extension for support specific layer ------------------------------
    if "CPU" in args.device:
        supported_layers = ie.query_network(net, "CPU")
        not_supported_layers = [
            l for l in net.layers.keys() if l not in supported_layers
        ]
        if len(not_supported_layers) != 0:
            log.error(
                "Following layers are not supported by the plugin for specified device {}:\n {}"
                .format(args.device, ', '.join(not_supported_layers)))
            log.error(
                "Please try to specify cpu extensions library path in sample's command line parameters using -l "
                "or --cpu_extension command line argument")
            sys.exit(1)

    assert len(net.inputs.keys(
    )) == 1, "Sample supports only YOLO V3 based single input topologies"

    # ---------------------------------------------- 4. Preparing inputs -----------------------------------------------
    log.info("Preparing inputs")
    input_blob = next(iter(net.inputs))

    #  Defaulf batch_size is 1
    net.batch_size = 1

    # Read and pre-process input images
    n, c, h, w = net.inputs[input_blob].shape

    if args.labels:
        with open(args.labels, 'r') as f:
            labels_map = [x.strip() for x in f]
    else:
        labels_map = None

    input_stream = 0 if args.input == "cam" else args.input

    is_async_mode = True
    cap = cv2.VideoCapture(input_stream)
    number_input_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    number_input_frames = 1 if number_input_frames != -1 and number_input_frames < 0 else number_input_frames

    wait_key_code = 1

    # Number of frames in picture is 1 and this will be read in cycle. Sync mode is default value for this case
    if number_input_frames != 1:
        ret, frame = cap.read()
    else:
        is_async_mode = False
        wait_key_code = 0

    # ----------------------------------------- 5. Loading model to the plugin -----------------------------------------
    log.info("Loading model to the plugin")
    exec_net = ie.load_network(network=net,
                               num_requests=2,
                               device_name=args.device)

    cur_request_id = 0
    next_request_id = 1
    render_time = 0
    parsing_time = 0

    # ----------------------------------------------- 6. Doing inference -----------------------------------------------
    log.info("Starting inference...")
    print(
        "To close the application, press 'CTRL+C' here or switch to the output window and press ESC key"
    )
    print(
        "To switch between sync/async modes, press TAB key in the output window"
    )
    while cap.isOpened():
        # Here is the first asynchronous point: in the Async mode, we capture frame to populate the NEXT infer request
        # in the regular mode, we capture frame to the CURRENT infer request
        if is_async_mode:
            ret, next_frame = cap.read()
        else:
            ret, frame = cap.read()

        if not ret:
            break

        if is_async_mode:
            request_id = next_request_id
            in_frame = cv2.resize(next_frame, (w, h))
        else:
            request_id = cur_request_id
            in_frame = cv2.resize(frame, (w, h))

        # resize input_frame to network size
        in_frame = in_frame.transpose(
            (2, 0, 1))  # Change data layout from HWC to CHW
        in_frame = in_frame.reshape((n, c, h, w))

        # Start inference
        start_time = time()
        exec_net.start_async(request_id=request_id,
                             inputs={input_blob: in_frame})
        det_time = time() - start_time

        # Collecting object detection results
        objects = list()
        if exec_net.requests[cur_request_id].wait(-1) == 0:
            output = exec_net.requests[cur_request_id].outputs
            start_time = time()
            for layer_name, out_blob in output.items():
                out_blob = out_blob.reshape(
                    net.layers[net.layers[layer_name].parents[0]].shape)
                layer_params = YoloParams(net.layers[layer_name].params,
                                          out_blob.shape[2])
                log.info("Layer {} parameters: ".format(layer_name))
                layer_params.log_params()
                objects += parse_yolo_region(out_blob, in_frame.shape[2:],
                                             frame.shape[:-1], layer_params,
                                             args.prob_threshold)
            parsing_time = time() - start_time

        # Filtering overlapping boxes with respect to the --iou_threshold CLI parameter
        objects = sorted(objects,
                         key=lambda obj: obj['confidence'],
                         reverse=True)
        for i in range(len(objects)):
            if objects[i]['confidence'] == 0:
                continue
            for j in range(i + 1, len(objects)):
                if intersection_over_union(objects[i],
                                           objects[j]) > args.iou_threshold:
                    objects[j]['confidence'] = 0

        # Drawing objects with respect to the --prob_threshold CLI parameter
        objects = [
            obj for obj in objects if obj['confidence'] >= args.prob_threshold
        ]

        if len(objects) and args.raw_output_message:
            log.info("\nDetected boxes for batch {}:".format(1))
            log.info(
                " Class ID | Confidence | XMIN | YMIN | XMAX | YMAX | COLOR ")

        origin_im_size = frame.shape[:-1]
        for obj in objects:
            # Validation bbox of detected object
            if obj['xmax'] > origin_im_size[1] or obj['ymax'] > origin_im_size[
                    0] or obj['xmin'] < 0 or obj['ymin'] < 0:
                continue
            color = (int(min(obj['class_id'] * 12.5,
                             255)), min(obj['class_id'] * 7,
                                        255), min(obj['class_id'] * 5, 255))
            det_label = labels_map[obj['class_id']] if labels_map and len(labels_map) >= obj['class_id'] else \
                str(obj['class_id'])

            if args.raw_output_message:
                log.info(
                    "{:^9} | {:10f} | {:4} | {:4} | {:4} | {:4} | {} ".format(
                        det_label, obj['confidence'], obj['xmin'], obj['ymin'],
                        obj['xmax'], obj['ymax'], color))

            cv2.rectangle(frame, (obj['xmin'], obj['ymin']),
                          (obj['xmax'], obj['ymax']), color, 2)
            cv2.putText(
                frame, "#" + det_label + ' ' +
                str(round(obj['confidence'] * 100, 1)) + ' %',
                (obj['xmin'], obj['ymin'] - 7), cv2.FONT_HERSHEY_COMPLEX, 0.6,
                color, 1)

        # Draw performance stats over frame
        inf_time_message = "Inference time: N\A for async mode" if is_async_mode else \
            "Inference time: {:.3f} ms".format(det_time * 1e3)
        render_time_message = "OpenCV rendering time: {:.3f} ms".format(
            render_time * 1e3)
        async_mode_message = "Async mode is on. Processing request {}".format(cur_request_id) if is_async_mode else \
            "Async mode is off. Processing request {}".format(cur_request_id)
        parsing_message = "YOLO parsing time is {:.3f} ms".format(
            parsing_time * 1e3)

        cv2.putText(frame, inf_time_message, (15, 15),
                    cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1)
        cv2.putText(frame, render_time_message, (15, 45),
                    cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)
        cv2.putText(frame, async_mode_message,
                    (10, int(origin_im_size[0] - 20)),
                    cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)
        cv2.putText(frame, parsing_message, (15, 30), cv2.FONT_HERSHEY_COMPLEX,
                    0.5, (10, 10, 200), 1)

        start_time = time()
        if not args.no_show:
            cv2.imshow("DetectionResults", frame)
        render_time = time() - start_time

        if is_async_mode:
            cur_request_id, next_request_id = next_request_id, cur_request_id
            frame = next_frame

        if not args.no_show:
            key = cv2.waitKey(wait_key_code)

            # ESC key
            if key == 27:
                break
            # Tab key
            if key == 9:
                exec_net.requests[cur_request_id].wait()
                is_async_mode = not is_async_mode
                log.info("Switched to {} mode".format(
                    "async" if is_async_mode else "sync"))

    cv2.destroyAllWindows()
def main():
    log.basicConfig(format="[ %(levelname)s ] %(message)s",
                    level=log.INFO,
                    stream=sys.stdout)
    args = build_argparser().parse_args()
    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"
    frame_counter = 0
    frame_waiter = -1

    #for tracking movement
    coordlist = []

    log.info("Creating Inference Engine...")
    ie = IECore()
    if args.cpu_extension and 'CPU' in args.device:
        ie.add_extension(args.cpu_extension, "CPU")
    # Read IR
    log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
    net = IENetwork(model=model_xml, weights=model_bin)

    if "CPU" in args.device:
        supported_layers = ie.query_network(net, "CPU")
        not_supported_layers = [
            l for l in net.layers.keys() if l not in supported_layers
        ]
        if len(not_supported_layers) != 0:
            log.error(
                "Following layers are not supported by the plugin for specified device {}:\n {}"
                .format(args.device, ', '.join(not_supported_layers)))
            log.error(
                "Please try to specify cpu extensions library path in sample's command line parameters using -l "
                "or --cpu_extension command line argument")
            sys.exit(1)

    img_info_input_blob = None
    feed_dict = {}
    for blob_name in net.inputs:
        if len(net.inputs[blob_name].shape) == 4:
            input_blob = blob_name
        elif len(net.inputs[blob_name].shape) == 2:
            img_info_input_blob = blob_name
        else:
            raise RuntimeError(
                "Unsupported {}D input layer '{}'. Only 2D and 4D input layers are supported"
                .format(len(net.inputs[blob_name].shape), blob_name))

    assert len(net.outputs) == 1, "Demo supports only single output topologies"

    out_blob = next(iter(net.outputs))
    log.info("Loading IR to the plugin...")
    exec_net = ie.load_network(network=net,
                               num_requests=2,
                               device_name=args.device)
    # Read and pre-process input image
    n, c, h, w = net.inputs[input_blob].shape
    if img_info_input_blob:
        feed_dict[img_info_input_blob] = [h, w, 1]

    #for tracking movement in 3d
    # Create a blank 300x300 black image
    image_tracking = np.zeros((h * 2, w, 3), np.uint8)
    image_tracking[:] = (255, 255, 255)

    if args.input == 'cam':
        input_stream = 0
    else:
        input_stream = args.input
    cap = cv2.VideoCapture(input_stream)
    assert cap.isOpened(), "Can't open " + input_stream

    if args.labels:
        with open(args.labels, 'r') as f:
            labels_map = [x.strip() for x in f]
    else:
        labels_map = None

    cur_request_id = 0
    next_request_id = 1

    log.info("Starting inference in async mode...")
    is_async_mode = True
    render_time = 0
    if is_async_mode:
        ret, frame = cap.read()
        frame_h, frame_w = frame.shape[:2]

    print(
        "To close the application, press 'CTRL+C' here or switch to the output window and press ESC key"
    )
    print(
        "To switch between sync/async modes, press TAB key in the output window"
    )

    while cap.isOpened():
        if is_async_mode:
            ret, next_frame = cap.read()
        else:
            ret, frame = cap.read()
            if ret:
                frame_h, frame_w = frame.shape[:2]
        if not ret:
            break  # abandons the last frame in case of async_mode
        # Main sync point:
        # in the truly Async mode we start the NEXT infer request, while waiting for the CURRENT to complete
        # in the regular mode we start the CURRENT request and immediately wait for it's completion
        inf_start = time.time()
        if is_async_mode:
            in_frame = cv2.resize(next_frame, (w, h))
            in_frame = in_frame.transpose(
                (2, 0, 1))  # Change data layout from HWC to CHW
            in_frame = in_frame.reshape((n, c, h, w))
            feed_dict[input_blob] = in_frame
            exec_net.start_async(request_id=next_request_id, inputs=feed_dict)
        else:
            in_frame = cv2.resize(frame, (w, h))
            in_frame = in_frame.transpose(
                (2, 0, 1))  # Change data layout from HWC to CHW
            in_frame = in_frame.reshape((n, c, h, w))
            feed_dict[input_blob] = in_frame
            exec_net.start_async(request_id=cur_request_id, inputs=feed_dict)
        if exec_net.requests[cur_request_id].wait(-1) == 0:
            inf_end = time.time()
            det_time = inf_end - inf_start

            # Parse detection results of the current request
            res = exec_net.requests[cur_request_id].outputs[out_blob]
            n_ppl = 0
            for obj in res[0][0]:
                # Draw only objects when probability more than specified threshold
                if obj[2] > args.prob_threshold:
                    n_ppl = n_ppl + 1
                    xmin = int(obj[3] * frame_w)
                    ymin = int(obj[4] * frame_h)
                    xmax = int(obj[5] * frame_w)
                    ymax = int(obj[6] * frame_h)
                    class_id = int(obj[1])
                    # Draw box and label\class_id
                    color = (min(class_id * 12.5,
                                 255), min(class_id * 7,
                                           255), min(class_id * 5, 255))
                    cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), color, 2)
                    det_label = labels_map[class_id] if labels_map else str(
                        class_id)
                    cv2.putText(
                        frame,
                        det_label + ' ' + str(round(obj[2] * 100, 1)) + ' %',
                        (xmin, ymin - 7), cv2.FONT_HERSHEY_COMPLEX, 0.6, color,
                        1)
                    x_track = round(xmin + abs(xmax - xmin) / 2)
                    y_track = round(ymax)
                    depth = round(ymax - ymin)
                    rawdepth = round(ymax - ymin)
                    coordlist += [(x_track, y_track, depth)]

            # Draw performance stats
            # inf_time_message = "Inference time: N\A for async mode" if is_async_mode else \
            #     "Inference time: {:.3f} ms".format(det_time * 1000)
            # render_time_message = "OpenCV rendering time: {:.3f} ms".format(render_time * 1000)
            # async_mode_message = "Async mode is on. Processing request {}".format(cur_request_id) if is_async_mode else \
            #     "Async mode is off. Processing request {}".format(cur_request_id)
            #
            # cv2.putText(frame, inf_time_message, (15, 15), cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1)
            # cv2.putText(frame, render_time_message, (15, 30), cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)
            # cv2.putText(frame, async_mode_message, (10, int(frame_h - 20)), cv2.FONT_HERSHEY_COMPLEX, 0.5,
            #             (10, 10, 200), 1)

            #new: adding the tracking spots
            color_blue = (255, 0, 0)
            for coord in coordlist:
                new_depth = round(((coord[2]) / 100)**2.5)
                frame = cv2.circle(frame, (coord[0], coord[1]),
                                   new_depth,
                                   color_blue,
                                   thickness=2)
                image_tracking = cv2.circle(image_tracking,
                                            (coord[0], round(coord[2] * 2)),
                                            3,
                                            color_blue,
                                            thickness=1)

            # adding text
            ppl_message = "People detected: {}".format(n_ppl)
            cv2.putText(frame, ppl_message, (30, round(frame_h - 50)),
                        cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3)

            ppl_message_alert = "!!OVERCROWDING!!"

            frame_counter = frame_counter + 1
            frame_waiter = frame_waiter - 1
            if (n_ppl > 7 and frame_waiter < 0):
                frame_waiter = 100
                cv2.putText(frame, ppl_message_alert,
                            (100, round(frame_h / 2)),
                            cv2.FONT_HERSHEY_SIMPLEX, 1.5, (0, 0, 255), 3)

                add_data.send_data(n_ppl, 7)

        #
        render_start = time.time()
        if not args.no_show:
            cv2.imshow("Detection Results", frame)
        render_end = time.time()
        render_time = render_end - render_start

        cv2.imshow('movement tracking', image_tracking)

        if is_async_mode:
            cur_request_id, next_request_id = next_request_id, cur_request_id
            frame = next_frame
            frame_h, frame_w = frame.shape[:2]

        if not args.no_show:
            key = cv2.waitKey(1)
            if key == 27:
                break
            if (9 == key):
                is_async_mode = not is_async_mode
                log.info("Switched to {} mode".format(
                    "async" if is_async_mode else "sync"))

    cv2.destroyAllWindows()
def main():
    log.basicConfig(format="[ %(levelname)s ] %(message)s",
                    level=log.INFO,
                    stream=sys.stdout)
    args = build_argparser().parse_args()
    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"
    # Plugin initialization for specified device and load extensions library if specified
    log.info("Initializing plugin for {} device...".format(args.device))
    plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
    if args.cpu_extension and 'CPU' in args.device:
        log.info("Loading plugins for {} device...".format(args.device))
        plugin.add_cpu_extension(args.cpu_extension)

    # Read IR
    log.info("Reading IR...")
    net = IENetwork(model=model_xml, weights=model_bin)

    if plugin.device == "CPU":
        supported_layers = plugin.get_supported_layers(net)
        not_supported_layers = [
            l for l in net.layers.keys() if l not in supported_layers
        ]
        if len(not_supported_layers) != 0:
            log.error(
                "Following layers are not supported by the plugin for specified device {}:\n {}"
                .format(plugin.device, ', '.join(not_supported_layers)))
            log.error(
                "Please try to specify cpu extensions library path in sample's command line parameters using -l "
                "or --cpu_extension command line argument")
            sys.exit(1)
    assert len(
        net.inputs.keys()) == 1, "Sample supports only single input topologies"
    assert len(
        net.outputs) == 1, "Sample supports only single output topologies"
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))
    log.info("Loading IR to the plugin...")
    exec_net = plugin.load(network=net, num_requests=2)
    # Read and pre-process input image
    if isinstance(net.inputs[input_blob], list):
        n, c, h, w = net.inputs[input_blob]
    else:
        n, c, h, w = net.inputs[input_blob].shape
    del net
    if args.input == 'cam':
        input_stream = 0
        out_file_name = 'cam'
    else:
        input_stream = args.input
        assert os.path.isfile(args.input), "Specified input file doesn't exist"
        out_file_name = os.path.splitext(os.path.basename(args.input))[0]

    if args.output_dir:
        out_path = os.path.join(args.output_dir, out_file_name + '.mp4')

    if args.labels:
        with open(args.labels, 'r') as f:
            labels_map = [x.strip() for x in f]
    else:
        labels_map = None

    cap = cv2.VideoCapture(input_stream)
    video_len = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    cur_request_id = 0
    next_request_id = 1

    log.info("Starting inference in async mode...")
    log.info("To switch between sync and async modes press Tab button")
    log.info("To stop the sample execution press Esc button")
    job_id = os.environ['PBS_JOBID']
    result_file = open(
        os.path.join(args.output_dir, 'output_' + str(job_id) + '.txt'), "w")
    progress_file_path = os.path.join(args.output_dir,
                                      'i_progress_' + str(job_id) + '.txt')

    is_async_mode = True
    render_time = 0
    fps_sum = 0
    frame_count = 0
    inf_list = []
    res_list = []
    try:
        infer_time_start = time.time()
        while cap.isOpened():
            read_time = time.time()
            ret, frame = cap.read()
            if not ret:
                break
            initial_w = cap.get(3)
            initial_h = cap.get(4)

            in_frame = cv2.resize(frame, (w, h))
            in_frame = in_frame.transpose(
                (2, 0, 1))  # Change data layout from HWC to CHW
            in_frame = in_frame.reshape((n, c, h, w))
            # Main sync point:
            # in the truly Async mode we start the NEXT infer request, while waiting for the CURRENT to complete
            # in the regular mode we start the CURRENT request and immediately wait for it's completion
            inf_start = time.time()
            if is_async_mode:
                exec_net.start_async(request_id=next_request_id,
                                     inputs={input_blob: in_frame})
            else:
                exec_net.start_async(request_id=cur_request_id,
                                     inputs={input_blob: in_frame})

            if exec_net.requests[cur_request_id].wait(-1) == 0:
                inf_end = time.time()
                det_time = inf_end - inf_start
                #Parse detection results of the current request
                res = exec_net.requests[cur_request_id].outputs[out_blob]
                processBoxes(frame_count, res, labels_map, args.prob_threshold,
                             frame, initial_w, initial_h, result_file,
                             det_time)

            #
            frame_count += 1
            #Write data to progress tracker
            if frame_count % 10 == 0:
                progressUpdate(progress_file_path,
                               time.time() - infer_time_start, frame_count,
                               video_len)

            key = cv2.waitKey(1)
            if key == 27:
                break
            if (9 == key):
                is_async_mode = not is_async_mode
                log.info("Switched to {} mode".format(
                    "async" if is_async_mode else "sync"))
            if is_async_mode:
                cur_request_id, next_request_id = next_request_id, cur_request_id

##End while loop /
        cap.release()
        result_file.close()

        if args.output_dir is None:
            cv2.destroyAllWindows()
        else:
            total_time = time.time() - infer_time_start
            with open(
                    os.path.join(args.output_dir,
                                 'stats_' + str(job_id) + '.txt'), 'w') as f:
                f.write(str(round(total_time, 1)) + '\n')
                f.write(str(frame_count) + '\n')

    finally:
        del exec_net
        del plugin
def main():
    log.basicConfig(format='[ %(levelname)s ] %(message)s',
                    level=log.INFO,
                    stream=sys.stdout)
    args = build_argparser().parse_args()

    input_images = list_input_images(args.input)

    # Loading network using ngraph function
    ngraph_function = create_ngraph_function(args)
    net = IENetwork(Function.to_capsule(ngraph_function))

    assert len(net.input_info.keys()
               ) == 1, "Sample supports only single input topologies"
    assert len(
        net.outputs) == 1, "Sample supports only single output topologies"

    log.info("Preparing input blobs")
    input_blob = next(iter(net.input_info))
    out_blob = next(iter(net.outputs))
    net.batch_size = len(input_images)

    # Read and pre-process input images
    n, c, h, w = net.input_info[input_blob].input_data.shape
    images = np.ndarray(shape=(n, c, h, w))
    for i in range(n):
        image = read_image(input_images[i])
        assert image is not None, log.error("Can't open an image {}".format(
            input_images[i]))
        assert len(image.shape) == 2, log.error(
            'Sample supports images with 1 channel only')
        if image.shape[:] != (w, h):
            log.warning("Image {} is resized from {} to {}".format(
                input_images[i], image.shape[:], (w, h)))
            image = cv2.resize(image, (w, h))
        images[i] = image
    log.info("Batch size is {}".format(n))

    log.info("Creating Inference Engine")
    ie = IECore()

    log.info('Loading model to the device')
    exec_net = ie.load_network(network=net, device_name=args.device.upper())

    # Start sync inference
    log.info('Creating infer request and starting inference')
    res = exec_net.infer(inputs={input_blob: images})

    # Processing results
    log.info("Processing output blob")
    res = res[out_blob]
    log.info("Top {} results: ".format(args.number_top))

    # Read labels file if it is provided as argument
    labels_map = None
    if args.labels:
        with open(args.labels, 'r') as f:
            labels_map = [x.split(sep=' ', maxsplit=1)[-1].strip() for x in f]

    classid_str = "classid"
    probability_str = "probability"
    for i, probs in enumerate(res):
        probs = np.squeeze(probs)
        top_ind = np.argsort(probs)[-args.number_top:][::-1]
        print("Image {}\n".format(input_images[i]))
        print(classid_str, probability_str)
        print("{} {}".format('-' * len(classid_str),
                             '-' * len(probability_str)))
        for class_id in top_ind:
            det_label = labels_map[class_id] if labels_map else "{}".format(
                class_id)
            label_length = len(det_label)
            space_num_before = (len(classid_str) - label_length) // 2
            space_num_after = len(classid_str) - (space_num_before +
                                                  label_length) + 2
            space_num_before_prob = (len(probability_str) -
                                     len(str(probs[class_id]))) // 2
            print("{}{}{}{}{:.7f}".format(' ' * space_num_before, det_label,
                                          ' ' * space_num_after,
                                          ' ' * space_num_before_prob,
                                          probs[class_id]))
        print("\n")

    log.info('This sample is an API example, for any performance measurements '
             'please use the dedicated benchmark_app tool')
def main():
    log.basicConfig(format="[ %(levelname)s ] %(message)s",
                    level=log.INFO,
                    stream=sys.stdout)
    args = build_argparser().parse_args()
    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"

    # Plugin initialization for specified device and load extensions library if specified
    plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
    if args.cpu_extension and 'CPU' in args.device:
        plugin.add_cpu_extension(args.cpu_extension)
    # Read IR
    log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
    net = IENetwork(model=model_xml, weights=model_bin)

    if plugin.device == "CPU":
        supported_layers = plugin.get_supported_layers(net)
        not_supported_layers = [
            l for l in net.layers.keys() if l not in supported_layers
        ]
        if len(not_supported_layers) != 0:
            log.error(
                "Following layers are not supported by the plugin for specified device {}:\n {}"
                .format(plugin.device, ', '.join(not_supported_layers)))
            log.error(
                "Please try to specify cpu extensions library path in sample's command line parameters using -l "
                "or --cpu_extension command line argument")
            sys.exit(1)
    assert len(
        net.inputs.keys()) == 1, "Sample supports only single input topologies"
    assert len(
        net.outputs) == 1, "Sample supports only single output topologies"

    log.info("Preparing input blobs")
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))
    net.batch_size = len(args.input)

    # Read and pre-process input images
    n, c, h, w = net.inputs[input_blob].shape
    images = np.ndarray(shape=(n, c, h, w))
    for i in range(n):
        image = cv2.imread(args.input[i])
        if image.shape[:-1] != (h, w):
            log.warning("Image {} is resized from {} to {}".format(
                args.input[i], image.shape[:-1], (h, w)))
            image = cv2.resize(image, (w, h))
        image = image.transpose(
            (2, 0, 1))  # Change data layout from HWC to CHW
        images[i] = image
    log.info("Batch size is {}".format(n))

    # Loading model to the plugin
    log.info("Loading model to the plugin")
    exec_net = plugin.load(network=net)

    # Start sync inference
    log.info("Starting inference ({} iterations)".format(args.number_iter))
    infer_time = []
    for i in range(args.number_iter):
        t0 = time()
        infer_request_handle = exec_net.start_async(
            request_id=0, inputs={input_blob: images})
        infer_request_handle.wait()
        infer_time.append((time() - t0) * 1000)
    log.info("Average running time of one iteration: {} ms".format(
        np.average(np.asarray(infer_time))))
    if args.perf_counts:
        perf_counts = infer_request_handle.get_perf_counts()
        log.info("Performance counters:")
        print("{:<70} {:<15} {:<15} {:<15} {:<10}".format(
            'name', 'layer_type', 'exet_type', 'status', 'real_time, us'))
        for layer, stats in perf_counts.items():
            print("{:<70} {:<15} {:<15} {:<15} {:<10}".format(
                layer, stats['layer_type'], stats['exec_type'],
                stats['status'], stats['real_time']))
    # Processing output blob
    log.info("Processing output blob")
    res = infer_request_handle.outputs[out_blob]
    log.info("Top {} results: ".format(args.number_top))
    if args.labels:
        with open(args.labels, 'r') as f:
            labels_map = [x.split(sep=' ', maxsplit=1)[-1].strip() for x in f]
    else:
        labels_map = None
    classid_str = "classid"
    probability_str = "probability"
    for i, probs in enumerate(res):
        probs = np.squeeze(probs)
        top_ind = np.argsort(probs)[-args.number_top:][::-1]
        print("Image {}\n".format(args.input[i]))
        print(classid_str, probability_str)
        print("{} {}".format('-' * len(classid_str),
                             '-' * len(probability_str)))
        for id in top_ind:
            det_label = labels_map[id] if labels_map else "{}".format(id)
            label_length = len(det_label)
            space_num_before = (7 - label_length) // 2
            space_num_after = 7 - (space_num_before + label_length) + 2
            space_num_before_prob = (11 - len(str(probs[id]))) // 2
            print("{}{}{}{}{:.7f}".format(' ' * space_num_before, det_label,
                                          ' ' * space_num_after,
                                          ' ' * space_num_before_prob,
                                          probs[id]))
        print("\n")
Exemplo n.º 9
0
import sys
import time
import random
import numpy
from tqdm import tqdm
import torch
from openvino.inference_engine import IECore, IENetwork
from networks import vgg11s
from netslim import load_pruned_model

# Initialize inference engine core
ie = IECore()

# build network with FP32
model_name = "vgg11s-pr05-FP32"
net = IENetwork(model="{}.xml".format(model_name), weights="{}.bin".format(model_name))
input_name = next(iter(net.inputs))
output_name = next(iter(net.outputs))
exec_net = ie.load_network(network=net, device_name="CPU")
openvino_pr05_fp32_pack = ["vgg11s-pr05-fp32-openvino", exec_net.infer, input_name]

# build network with FP16
model_name = "vgg11s-pr05-FP16"
net = IENetwork(model="{}.xml".format(model_name), weights="{}.bin".format(model_name))
input_name = next(iter(net.inputs))
output_name = next(iter(net.outputs))
exec_net = ie.load_network(network=net, device_name="CPU")
openvino_pr05_fp16_pack = ["vgg11s-pr05-fp16-openvino", exec_net.infer, input_name]

def run_openvino(net, input_name, n):
    for i in range(n):
Exemplo n.º 10
0
# ----------------- Load the model and create the inference engine ------------
# -----------------------------------------------------------------------------
log.info(f'Loading model')

model_xml = args['model']
model_bin = os.path.splitext(model_xml)[0] + '.bin'

log.info(f'... model file {model_xml}')
log.info(f'... weights file {model_bin}')

# -----------------------------------------------------------------------------
# ----------------- Create inference engine -----------------------------------
# -----------------------------------------------------------------------------
log.info('Creating inference engine')
ie = IECore()
net = IENetwork(model=model_xml, weights=model_bin)

if args['extension'] and 'CPU' in args['device']:
    ie.add_extension(args['extension'], 'CPU')

log.info('...Checking that the network can be run on the selected device')
supported_layers = ie.query_network(net, args['device'])
not_supported_layers = [
    l for l in net.layers.keys() if l not in supported_layers
]

if len(not_supported_layers) != 0:
    log.error(
        '...The following layers are not supported by the device.\n {}'.format(
            ', '.join(not_supported_layers)))
Exemplo n.º 11
0
def main():
    log.basicConfig(format="[ %(levelname)s ] %(message)s",
                    level=log.INFO,
                    stream=sys.stdout)
    args = build_argparser().parse_args()
    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"
    with open(args.testparam, 'r') as stream:
        test_param = yaml.load(stream, Loader=yaml.FullLoader)
    sample_rate = test_param['sample_rate']
    rolling_step = test_param['rolling_step']  # this is rolling step

    test_folder = test_param['test_path'] + "/audio"
    yaml_file = test_param['test_path'] + \
                "/meta/mixture_recipes_devtest_gunshot.yaml"

    print("Testing folder to proceed:")
    print(test_folder)
    print(model_bin)
    log.info("Creating Inference Engine...")
    ie = IECore()

    if args.cpu_extension and 'CPU' in args.device:
        ie.add_extension(args.cpu_extension, "CPU")
        # Read IR
    log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
    net = IENetwork(model=model_xml, weights=model_bin)

    if "CPU" in args.device:
        supported_layers = ie.query_network(net, "CPU")
        not_supported_layers = [
            l for l in net.layers.keys() if l not in supported_layers
        ]
        if len(not_supported_layers) != 0:
            log.error(
                "Following layers are not supported by the plugin for specified device {}:\n {}"
                .format(args.device, ', '.join(not_supported_layers)))
            log.error(
                "Please try to specify cpu extensions library path in sample's command line parameters using -l "
                "or --cpu_extension command line argument")
            sys.exit(1)

    info_input_blob = None
    input_shape_dims = None
    feed_dict = {}
    for blob_name in net.inputs:
        print(len(net.inputs[blob_name].shape))
        if len(net.inputs[blob_name].shape) == 3:
            input_blob = blob_name
            input_shape_dims = 3
        elif len(net.inputs[blob_name].shape) == 2:
            info_input_blob = blob_name
            input_shape_dims = 2

        else:
            raise RuntimeError(
                "Unsupported {}D input layer '{}'. Only 2D and 4D input layers are supported"
                .format(len(net.inputs[blob_name].shape), blob_name))
    print(len(net.outputs))

    assert len(net.outputs) == 1, "Demo supports only single output topologies"
    out_blob = next(iter(net.outputs))
    log.info("Loading IR to the plugin...")
    exec_net = ie.load_network(network=net,
                               num_requests=1,
                               device_name=args.device)

    # Read and pre-process input image
    if input_shape_dims == 3:
        batch_size, window_width, num_class = net.inputs[input_blob].shape
    elif input_shape_dims == 2:
        batch_size, window_width = net.inputs[input_blob].shape
    else:
        print("check input shape dimension: ")
        return
    print(net.inputs[input_blob].shape)
    files = aux_fn.get_all_files(test_folder)
    sig_process = {}
    file_meta = None
    with open(yaml_file, 'r') as infile:
        metadata = yaml.load(infile, Loader=yaml.SafeLoader)
    for file in tqdm(files):
        file_time = time()
        print(file)
        logging.info(f' inference on {file}: ')

        filename_wav = os.path.basename(file)
        for i in range(len(metadata)):
            if filename_wav in metadata[i]['mixture_audio_filename']:
                file_meta = metadata[i]
                break

        data = load_data(window_width, file, sample_rate, rolling_step)
        if input_shape_dims == 3:
            data = np.expand_dims(data, axis=2)

        iteration = data.shape[0]

        pred = []

        for iterate in range(iteration):
            print(input_blob)
            if input_shape_dims == 3:
                feed_dict[input_blob] = data[iterate, :, :]
            else:
                feed_dict[input_blob] = data[iterate, :]
            # inf_start = time.time()
            exec_net.start_async(request_id=0, inputs=feed_dict)
            if exec_net.requests[0].wait(-1) == 0:
                # inf_end = time.time()
                # det_time = inf_end - inf_start
                res = exec_net.requests[0].outputs[out_blob]
                print("result shape", res.shape)
                pred.append(list(res))

        sig_process[filename_wav] = get_response(data, pred, file_meta,
                                                 sample_rate, rolling_step)
        tmp_time = time() - file_time
        logging.info(
            f" \tfile inference time = {tmp_time:.2f} s, {(tmp_time / 117) * 1000:.4f} ms per sample"
        )
Exemplo n.º 12
0
def read_segmentation_demo():
    log.basicConfig(format="[ %(levelname)s ] %(message)s",
                    level=log.INFO,
                    stream=sys.stdout)
    # Plugin initialization for specified device and load extensions library if specified
    log.info("Initializing plugin for {} device...".format("CPU"))
    plugin = IEPlugin(device="CPU", plugin_dirs=plugin_dir)
    plugin.add_cpu_extension(cpu_extension)
    # Read IR
    log.info("Reading IR...")
    net = IENetwork(model=model_xml, weights=model_bin)

    if plugin.device == "CPU":
        supported_layers = plugin.get_supported_layers(net)
        not_supported_layers = [
            l for l in net.layers.keys() if l not in supported_layers
        ]
        if len(not_supported_layers) != 0:
            log.error(
                "Following layers are not supported by the plugin for specified device {}:\n {}"
                .format(plugin.device, ', '.join(not_supported_layers)))
            log.error(
                "Please try to specify cpu extensions library path in demo's command line parameters using -l "
                "or --cpu_extension command line argument")
            sys.exit(1)
    assert len(
        net.inputs.keys()) == 1, "Demo supports only single input topologies"
    assert len(net.outputs) == 1, "Demo supports only single output topologies"
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))
    log.info("Loading IR to the plugin...")
    exec_net = plugin.load(network=net, num_requests=2)
    # Read and pre-process input image
    n, c, h, w = net.inputs[input_blob].shape
    del net
    cap = cv2.VideoCapture(
        "/home/kang/Downloads/openvino_sample_show-master/material/read_segmentation_demo.mp4"
    )

    cur_request_id = 0
    next_request_id = 1

    log.info("Starting inference in async mode...")
    log.info("To switch between sync and async modes press Tab button")
    log.info("To stop the demo execution press Esc button")
    is_async_mode = True
    render_time = 0
    ret, frame = cap.read()

    print(
        "To close the application, press 'CTRL+C' or any key with focus on the output window"
    )
    while cap.isOpened():
        if is_async_mode:
            ret, next_frame = cap.read()
        else:
            ret, frame = cap.read()
        if not ret:
            break
        initial_w = cap.get(3)
        initial_h = cap.get(4)
        # 开启同步或者异步执行模式
        inf_start = time.time()
        if is_async_mode:
            in_frame = cv2.resize(next_frame, (w, h))
            in_frame = in_frame.transpose(
                (2, 0, 1))  # Change data layout from HWC to CHW
            in_frame = in_frame.reshape((n, c, h, w))
            exec_net.start_async(request_id=next_request_id,
                                 inputs={input_blob: in_frame})
        else:
            in_frame = cv2.resize(frame, (w, h))
            in_frame = in_frame.transpose(
                (2, 0, 1))  # Change data layout from HWC to CHW
            in_frame = in_frame.reshape((n, c, h, w))
            exec_net.start_async(request_id=cur_request_id,
                                 inputs={input_blob: in_frame})
        if exec_net.requests[cur_request_id].wait(-1) == 0:

            # 获取网络输出
            res = exec_net.requests[cur_request_id].outputs[out_blob]
            # 解析道路分割结果
            res = np.squeeze(res, 0)
            res = res.transpose(1, 2, 0)  #HWC格式
            res = np.argmax(res, 2)
            hh, ww = res.shape
            mask = np.zeros((hh, ww, 3), dtype=np.uint8)
            mask[np.where(res > 0)] = (0, 255, 255)
            mask[np.where(res > 1)] = (255, 0, 255)

            # 显示mask
            cv2.imshow("mask", mask)
            # 叠加输出结果 --解决图的不一样大小问题,1是高度,0是宽度
            mask = cv2.resize(mask, dsize=(frame.shape[1], frame.shape[0]))
            # 想要增加颜色深度,就增大0.2,减小0.8,和为1
            frame = cv2.addWeighted(mask, 0.2, frame, 0.8, 0)

            inf_end = time.time()
            det_time = inf_end - inf_start

            # Draw performance stats
            inf_time_message = "Inference time: {:.3f} ms, FPS:{:.3f}".format(
                det_time * 1000, 1000 / (det_time * 1000 + 1))
            render_time_message = "OpenCV rendering time: {:.3f} ms".format(
                render_time * 1000)
            async_mode_message = "Async mode is on. Processing request {}".format(cur_request_id) if is_async_mode else \
                "Async mode is off. Processing request {}".format(cur_request_id)

            cv2.putText(frame, inf_time_message, (15, 15),
                        cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1)
            cv2.putText(frame, render_time_message, (15, 30),
                        cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)
            cv2.putText(frame, async_mode_message, (10, int(initial_h - 20)),
                        cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)

        #
        render_start = time.time()
        cv2.imshow("segmentation Results", frame)
        render_end = time.time()
        render_time = render_end - render_start

        if is_async_mode:
            cur_request_id, next_request_id = next_request_id, cur_request_id
            frame = next_frame

        key = cv2.waitKey(1)
        if key == 27:
            break
    cv2.destroyAllWindows()

    del exec_net
    del plugin
Exemplo n.º 13
0
def main_IE_infer():

    # Define Constants.

    t1 = 0
    fps = ""
    #framepos = 0
    #frame_count = 0
    #vidfps = 0
    #skip_frame = 0
    #elapsedTime = 0

    detected_people_frames = [0, 0, 0, 0, 0]
    detected_hat_frames = [0, 0, 0, 0, 0]
    detected_vest_frames = [0, 0, 0, 0, 0]

    print("loading the model...")
    #    args = build_argparser().parse_args()
    model_xml = "tiny_yolo_IR_500000_FP32.xml"  #<--- MYRIAD
    model_bin = os.path.splitext(model_xml)[0] + ".bin"

    time.sleep(1)
    print("loading plugin on Intel NCS2...")

    plugin = IEPlugin(device="CPU")
    net = IENetwork(model=model_xml, weights=model_bin)
    input_blob = next(iter(net.inputs))
    exec_net = plugin.load(network=net)

    # Define a window to show the cam stream on it
    window_title = "PPE Detector"
    cv2.namedWindow(window_title, cv2.WINDOW_NORMAL)

    while cap.isOpened():
        current_time = time.ctime()

        # initialize lists to store objects in frame
        People = []
        Hats = []
        Vests = []
        objects = []

        det_person = 0
        det_hat = 0
        det_vest = 0
        # Shift register for detected objects.
        detected_people_frames[1:] = detected_people_frames[0:9]
        detected_hat_frames[1:] = detected_hat_frames[0:9]
        detected_vest_frames[1:] = detected_vest_frames[0:9]

        #lload the image from the camera.
        ret, image = cap.read()
        if not ret:
            break
        #resized_image = cv2.resize(image, (new_w, new_h), interpolation = cv2.INTER_CUBIC) #resize image to 416x416
        resized_image = cv2.resize(image,
                                   (new_w, new_h))  #resize image to 416x416
        resized_image = cv2.cvtColor(resized_image, cv2.COLOR_RGB2BGR)
        canvas = np.full((m_input_size, m_input_size, 3), 0)
        canvas[(m_input_size - new_h) // 2:(m_input_size - new_h) // 2 + new_h,
               (m_input_size - new_w) // 2:(m_input_size - new_w) // 2 +
               new_w, :] = resized_image
        prepimg = canvas
        prepimg = prepimg[np.newaxis, :, :, :]  # Batch size axis add
        prepimg = prepimg.transpose((0, 3, 1, 2))  # NHWC to NCHW
        outputs = exec_net.infer(inputs={input_blob: prepimg})

        for output in outputs.values():
            objects = ParseYOLOV3Output(output, new_h, new_w, camera_height,
                                        camera_width, confidence_threshold_hat,
                                        objects)

        # Filtering overlapping boxes same class

        # Separate classes detected.
        objlen = len(objects)

        for i in range(objlen):
            if (objects[i].class_id == 0):
                People.append(objects[i])

            if (objects[i].class_id == 1):
                Hats.append(objects[i])
            if (objects[i].class_id == 2):
                Vests.append(objects[i])

        # Elimitate overlaping Hats.
        objlen = len(Hats)
        for i in range(objlen):
            if (Hats[i].confidence == 0.0):
                continue
            for j in range(i + 1, objlen):
                if (IntersectionOverUnion(Hats[i], Hats[j]) >= IOU_threshold):
                    if Hats[i].confidence < Hats[j].confidence:
                        Hats[i], Hats[j] = Hats[j], Hats[i]
                    Hats[j].confidence = 0.0

        # Drawing hats boxes.
        for obj in Hats:
            if obj.confidence < confidence_threshold_hat:
                continue
            label = obj.class_id
            confidence = obj.confidence
            #if confidence >= 0.2:
            label_text = LABELS[label] + " (" + "{:.1f}".format(
                confidence * 100) + "%)"
            #label_text = LABELS[label]
            cv2.rectangle(image, (obj.xmin, obj.ymin), (obj.xmax, obj.ymax),
                          color2, box_thickness)
            cv2.putText(image, label_text, (obj.xmin, obj.ymin - 5),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, label_text_color, 1)
            det_hat = det_hat + 1
        detected_hat_frames[0] = det_hat

        # Eliminate overlaping vests.
        objlen = len(Vests)
        for i in range(objlen):
            if (Vests[i].confidence == 0.0):
                continue
            for j in range(i + 1, objlen):
                if (IntersectionOverUnion(Vests[i], Vests[j]) >=
                        IOU_threshold):
                    if Vests[i].confidence < Vests[j].confidence:
                        Vests[i], Vests[j] = Vests[j], Vests[i]
                    Vests[j].confidence = 0.0

        # Drawing vests boxes
        for obj in Vests:
            #print(str(confidence))
            if obj.confidence < confidence_threshold_vest:
                continue
            label = obj.class_id
            confidence = obj.confidence
            #print(str(confidence))
            label_text = LABELS[label] + " (" + "{:.1f}".format(
                confidence * 100) + "%)"
            #label_text = LABELS[label]
            cv2.rectangle(image, (obj.xmin, obj.ymin), (obj.xmax, obj.ymax),
                          color3, box_thickness)
            cv2.putText(image, label_text, (obj.xmin, obj.ymin - 5),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, label_text_color, 1)
            det_vest = det_vest + 1
        detected_vest_frames[0] = det_vest

        # Eliminate overlaping people
        objlen = len(People)
        for i in range(objlen):
            if (People[i].confidence == 0.0):
                continue
            for j in range(i + 1, objlen):
                if (IntersectionOverUnion(People[i], People[j]) >=
                        IOU_threshold):
                    if People[i].confidence < People[j].confidence:
                        People[i], People[j] = People[j], People[i]
                    People[j].confidence = 0.0

        # Drawing people's boxes
        for obj in People:
            if obj.confidence < confidence_threshold_person:
                continue
            label = obj.class_id
            confidence = obj.confidence
            #if confidence >= 0.2:
            label_text = LABELS[label] + " (" + "{:.1f}".format(
                confidence * 100) + "%)"
            #label_text = LABELS[label]
            cv2.rectangle(image, (obj.xmin, obj.ymin), (obj.xmax, obj.ymax),
                          color1, box_thickness)
            cv2.putText(image, label_text, (obj.xmin, obj.ymin - 5),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, label_text_color, 1)
            det_person = det_person + 1
        detected_people_frames[0] = det_person

        conclusion_color = [0, 0, 255]

        if max(detected_people_frames) != 0:
            if max(detected_hat_frames) != 0:
                if max(detected_vest_frames) != 0:
                    conclusion = "PPE: Detected!"
                    conclusion_color = [0, 0, 255]
                    print(current_time[11:19] + ' - ' + '\x1b[6;30;42m' +
                          '[✔] %s [✔]' % (conclusion) + '\x1b[0m')

                    #sh.set_pixels(Person_Vest_Hat)
                    conclusion_color = [0, 255, 0]
                else:
                    conclusion = "PPE: Not Pass, Missing Vest"
                    print(current_time[11:19] + ' - ' + '\x1b[1;29;41m' +
                          '[✘]' + conclusion + '[✘]' + '\x1b[0m')
                    #sh.set_pixels(Person_hat)
            else:
                if max(detected_vest_frames) != 0:
                    conclusion = "PPE: Not Pass, Missing Hat"
                    print(current_time[11:16] + ' - ' + '\x1b[1;29;41m' +
                          '[✘]' + conclusion + '[✘]' + '\x1b[0m')
                    #sh.set_pixels(Person_Vest)
                else:
                    conclusion = "PPE: Not Pass, Missing Vest and hat"
                    print(current_time[11:19] + ' - ' + '\x1b[1;29;41m' +
                          '[✘]' + conclusion + '[✘]' + '\x1b[0m')
                    #sh.set_pixels(Person)
        else:
            conclusion = "No Person Detected"
            #sh.set_pixels(question_mark) #display a question mark
            print(current_time[11:19] + ' - ' + conclusion)

        # Write Performance information.
        elapsedTime = time.time() - t1
        fps = "{:.1f} FPS".format(1 / elapsedTime)

        #   print((fps+ " - " + conclusion))

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

        # Restart the time.
        #t1 = time.time()

        cv2.putText(image, (fps), (15, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                    conclusion_color, 1, cv2.LINE_AA)

        #Display Image
        cv2.imshow(window_title, image)
        # wrute the output
        for f in range(3):
            out.write(image)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

        # Restart the time.
        t1 = time.time()

        #temp = sh.get_temperature()
        #print("Temp: %s ºC" % str(round(temp,1)))               # Show temp on console
        #rawCapture.truncate(0)
    out.release()
    cv2.destroyAllWindows()

    #sh.clear(0,0,0)
    del net
    del exec_net
    del plugin
Exemplo n.º 14
0
def make_network(model, weights):
    return IENetwork(model = model, weights = weights)
Exemplo n.º 15
0
def main():
    parser = argparse.ArgumentParser(
        description='Component executing inference operation')
    parser.add_argument('--model_bin',
                        type=str,
                        required=True,
                        help='GCS or local path to model weights file (.bin)')
    parser.add_argument('--model_xml',
                        type=str,
                        required=True,
                        help='GCS or local path to model graph (.xml)')
    parser.add_argument('--input_numpy_file',
                        type=str,
                        required=True,
                        help='GCS or local path to input dataset numpy file')
    parser.add_argument('--label_numpy_file',
                        type=str,
                        required=True,
                        help='GCS or local path to numpy file with labels')
    parser.add_argument('--output_folder',
                        type=str,
                        required=True,
                        help='GCS or local path to results upload folder')
    parser.add_argument('--batch_size',
                        type=int,
                        default=1,
                        help='batch size to be used for inference')
    parser.add_argument('--scale_div',
                        type=float,
                        default=1,
                        help='scale the np input by division of by the value')
    parser.add_argument('--scale_sub',
                        type=float,
                        default=128,
                        help='scale the np input by substraction of the value')
    args = parser.parse_args()
    print(args)

    device = "CPU"
    plugin_dir = None

    model_xml = get_local_file(args.model_xml)
    print("model xml", model_xml)
    if model_xml == "":
        exit(1)
    model_bin = get_local_file(args.model_bin)
    print("model bin", model_bin)
    if model_bin == "":
        exit(1)
    input_numpy_file = get_local_file(args.input_numpy_file)
    print("input_numpy_file", input_numpy_file)
    if input_numpy_file == "":
        exit(1)

    label_numpy_file = get_local_file(args.label_numpy_file)
    print("label_numpy_file", label_numpy_file)
    if label_numpy_file == "":
        exit(1)

    cpu_extension = "/usr/local/lib/libcpu_extension.so"

    plugin = IEPlugin(device=device, plugin_dirs=plugin_dir)
    if cpu_extension and 'CPU' in device:
        plugin.add_cpu_extension(cpu_extension)

    print("inference engine:", model_xml, model_bin, device)

    # Read IR
    print("Reading IR...")
    net = IENetwork(model=model_xml, weights=model_bin)
    batch_size = args.batch_size
    net.batch_size = batch_size
    print("Model loaded. Batch size", batch_size)

    input_blob = next(iter(net.inputs))
    output_blob = next(iter(net.outputs))
    print(output_blob)

    print("Loading IR to the plugin...")
    exec_net = plugin.load(network=net, num_requests=1)

    print("Loading input numpy")
    imgs = np.load(input_numpy_file, mmap_mode='r', allow_pickle=False)
    imgs = (imgs / args.scale_div) - args.scale_div
    lbs = np.load(label_numpy_file, mmap_mode='r', allow_pickle=False)

    print("Loaded input data", imgs.shape, imgs.dtype, "Min value:",
          np.min(imgs), "Max value", np.max(imgs))

    combined_results = {}  # dictionary storing results for all model outputs
    processing_times = np.zeros((0), int)
    matched_count = 0
    total_executed = 0

    for x in range(0, imgs.shape[0] - batch_size + 1, batch_size):
        img = imgs[x:(x + batch_size)]
        lb = lbs[x:(x + batch_size)]
        start_time = datetime.datetime.now()
        results = exec_net.infer(inputs={input_blob: img})
        end_time = datetime.datetime.now()
        duration = (end_time - start_time).total_seconds() * 1000
        print("Inference duration:", duration, "ms")
        processing_times = np.append(processing_times,
                                     np.array([int(duration)]))
        output = list(results.keys())[0]  # check only one output
        nu = results[output]
        for i in range(nu.shape[0]):
            single_result = nu[[i], ...]
            ma = np.argmax(single_result)
            total_executed += 1
            if ma == lb[i]:
                matched_count += 1
                mark_message = "; Correct match."
            else:
                mark_message = "; Incorrect match. Should be {} {}".format(
                    lb[i], classes.imagenet_classes[lb[i]])
            print("\t", i, classes.imagenet_classes[ma], ma, mark_message)
        if output in combined_results:
            combined_results[output] = np.append(combined_results[output],
                                                 results[output], 0)
        else:
            combined_results[output] = results[output]

    filename = output.replace("/", "_") + ".npy"
    np.save(filename, combined_results[output])
    upload_file(filename, args.output_folder)
    print("Inference results uploaded to", filename)
    print('Classification accuracy: {:.2f}'.format(100 * matched_count /
                                                   total_executed))
    print('Average time: {:.2f} ms; average speed: {:.2f} fps'.format(
        round(np.average(processing_times), 2),
        round(1000 * batch_size / np.average(processing_times), 2)))

    accuracy = matched_count / total_executed
    latency = np.average(processing_times)
    metrics = {
        'metrics': [{
            'name': 'accuracy-score',
            'numberValue': accuracy,
            'format': "PERCENTAGE"
        }, {
            'name': 'latency',
            'numberValue': latency,
            'format': "RAW"
        }]
    }

    with open('/mlpipeline-metrics.json', 'w') as f:
        json.dump(metrics, f)
# load the configuration file
conf = Conf(args["conf"])

# load the COCO class labels our YOLO model was trained on and
# initialize a list of colors to represent each possible class
# label
LABELS = open(conf["labels_path"]).read().strip().split("\n")
np.random.seed(42)
COLORS = np.random.uniform(0, 255, size=(len(LABELS), 3))

# initialize the plugin in for specified device
plugin = IEPlugin(device="MYRIAD")

# read the IR generated by the Model Optimizer (.xml and .bin files)
print("[INFO] loading models...")
net = IENetwork(model=conf["xml_path"], weights=conf["bin_path"])

# prepare inputs
print("[INFO] preparing inputs...")
inputBlob = next(iter(net.inputs))

# set the default batch size as 1 and get the number of input blobs,
# number of channels, the height, and width of the input blob
net.batch_size = 1
(n, c, h, w) = net.inputs[inputBlob].shape

# if a video path was not supplied, grab a reference to the webcam
if args["input"] is None:
    print("[INFO] starting video stream...")
    # vs = VideoStream(src=0).start()
    vs = VideoStream(usePiCamera=True).start()
def main():
    log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
    args = build_argparser().parse_args()
    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"
    # Plugin initialization for specified device and load extensions library if specified
    log.info("Initializing plugin for {} device...".format(args.device))
    plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
    if args.cpu_extension and 'CPU' in args.device:
        plugin.add_cpu_extension(args.cpu_extension)
    # Read IR
    log.info("Reading IR...")
    net = IENetwork(model=model_xml, weights=model_bin)

    if plugin.device == "CPU":
        supported_layers = plugin.get_supported_layers(net)
        not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]
        if len(not_supported_layers) != 0:
            log.error("Following layers are not supported by the plugin for specified device {}:\n {}".
                      format(plugin.device, ', '.join(not_supported_layers)))
            log.error("Please try to specify cpu extensions library path in demo's command line parameters using -l "
                      "or --cpu_extension command line argument")
            sys.exit(1)
    assert len(net.inputs.keys()) == 1, "Demo supports only single input topologies"
    assert len(net.outputs) == 1, "Demo supports only single output topologies"
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))
    log.info("Loading IR to the plugin...")
    exec_net = plugin.load(network=net, num_requests=2)
    # Read and pre-process input image
    n, c, h, w = net.inputs[input_blob].shape

    log.info("Net input shape: " + str((n, c, h, w)))
    log.info("Net output shape: " + str(net.outputs[out_blob].shape))

    del net
    if args.input == 'cam':
        input_stream = 0
    else:
        input_stream = args.input
        assert os.path.isfile(args.input), "Specified input file doesn't exist"
    if args.labels:
        with open(args.labels, 'r') as f:
            labels_map = [x.strip() for x in f]
    else:
        labels_map = None

    cap = cv2.VideoCapture(input_stream)

    cur_request_id = 0
    next_request_id = 1

    log.info("Starting inference in async mode...")
    log.info("To switch between sync and async modes press Tab button")
    log.info("To stop the demo execution press Esc button")
    is_async_mode = True
    render_time = 0
    ret, frame = cap.read()

    print("To close the application, press 'q'")
    while cap.isOpened():
        if is_async_mode:
            ret, next_frame = cap.read()
        else:
            ret, frame = cap.read()
        if not ret:
            break

        initial_w = cap.get(3)
        initial_h = cap.get(4)

        # Main sync point:
        # in the truly Async mode we start the NEXT infer request, while waiting for the CURRENT to complete
        # in the regular mode we start the CURRENT request and immediately wait for it's completion
        inf_start = time.time()
        if is_async_mode:
            in_frame = cv2.resize(next_frame, (w, h))
            in_frame = in_frame.transpose((2, 0, 1))  # Change data layout from HWC to CHW
            in_frame = in_frame.reshape((n, c, h, w))
            exec_net.start_async(request_id=next_request_id, inputs={input_blob: in_frame})
        else:
            in_frame = cv2.resize(frame, (w, h))
            in_frame = in_frame.transpose((2, 0, 1))  # Change data layout from HWC to CHW
            in_frame = in_frame.reshape((n, c, h, w))
            exec_net.start_async(request_id=cur_request_id, inputs={input_blob: in_frame})
        if exec_net.requests[cur_request_id].wait(-1) == 0:
            inf_end = time.time()
            det_time = inf_end - inf_start

            # Parse detection results of the current request, only need one so choose max prob
            res = exec_net.requests[cur_request_id].outputs[out_blob]
            for obj in res[0][0]:
                # Draw only objects when probability more than specified threshold
                if obj[2] > args.prob_threshold:
                    best_proposal = obj

                    xmin = int(best_proposal[3] * initial_w)
                    ymin = int(best_proposal[4] * initial_h)
                    xmax = int(best_proposal[5] * initial_w)
                    ymax = int(best_proposal[6] * initial_h)
                    class_id = int(best_proposal[1])

                    # Draw box and label\class_id
                    color = (min(class_id * 12.5, 255), min(class_id * 7, 255), min(class_id * 5, 255))
                    cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), color, 2)

                    det_label = labels_map[class_id - 1] if labels_map else str(class_id)
                    label_and_prob = det_label + ", " + str(obj[2] * 100) + "%"
                    cv2.putText(frame, label_and_prob, (xmin, ymin - 7), cv2.FONT_HERSHEY_COMPLEX, 0.6, color, 1)

            # Draw performance stats
            inf_time_message = "Inference time: N\A for async mode" if is_async_mode else \
                "Inference time: {:.3f} ms".format(det_time * 1000)
            render_time_message = "OpenCV rendering time: {:.3f} ms".format(render_time * 1000)
            async_mode_message = "Async mode is on. Processing request {}".format(cur_request_id) if is_async_mode else \
                "Async mode is off. Processing request {}".format(cur_request_id)

            cv2.putText(frame, inf_time_message, (15, 15), cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1)
            cv2.putText(frame, render_time_message, (15, 30), cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)
            cv2.putText(frame, async_mode_message, (10, int(initial_h - 20)), cv2.FONT_HERSHEY_COMPLEX, 0.5,
                        (10, 10, 200), 1)

        render_start = time.time()
        cv2.imshow("Detection Results", frame)
        render_end = time.time()
        render_time = render_end - render_start

        if is_async_mode:
            cur_request_id, next_request_id = next_request_id, cur_request_id
            frame = next_frame

        key = cv2.waitKey(1)
        if (key & 0xFF) == ord('q'): # NOTICE: Changed exit key to 'q', not 'CTRL + C'
            break
        if (9 == key):
            is_async_mode = not is_async_mode
            log.info("Switched to {} mode".format("async" if is_async_mode else "sync"))

    cv2.destroyAllWindows()
import cv2
import numpy as np
from openvino.inference_engine import IENetwork, IECore # 環境設定をした環境下で読み込みできる

model_xml = "./model/face-detection-adas-0001.xml" # 学習モデルの呼び出し
model_bin = "./model/face-detection-adas-0001.bin" # xmlに対応するbinファイル
model_age_xml = "./model/age-gender-recognition-retail-0013.xml" # 学習モデルの呼び出し
model_age_bin = "./model/age-gender-recognition-retail-0013.bin" # xmlに対応するbinファイル



#age版
ie_age = IECore() # IRの作成
net_age = IENetwork(model=model_age_xml, weights=model_age_bin) # モデルの読み込み、定義
input_blob_age = next(iter(net_age.inputs)) # モデルの入力パラメータを取り出す
out_blob_age = next(iter(net_age.outputs)) # 出力パラメータを取り出す
net_age.batch_size = 1 # バッチサイズ(周りのやつ)
net_age.inputs[input_blob_age].precision = "U8" # uint8型(画像配列)に変換
n, c, h_age, w_age = net_age.inputs[input_blob_age].shape # モデルで必要とされる枚数n、深さc、幅高さ
#image = np.ndarray(shape=(c, h, w)) # 配列を準備

image = cv2.imread('me.png') # 画像の読み込み
if image.shape[:-1] != (w_age ,h_age): # 大きさが違うときリサイズ
    image = cv2.resize(image, (w_age, h_age))
image = image.transpose((2, 0, 1))  # 入力画像がHWC なのをCHWの変換

exec_net_age = ie_age.load_network(network=net_age, device_name='CPU') # 設定したプラグインの読み込み
res_age = exec_net_age.infer(inputs={input_blob_age: image}) # 推論を行う
res_age = res_age[out_blob_age] # 結果の取り出し
prob = exec_net_age.requests[0].outputs['prob']
Exemplo n.º 19
0
    def load_model(self,
                   model,
                   device,
                   input_size,
                   output_size,
                   num_requests,
                   cpu_extension=None):
        """
         Loads a network and an image to the Inference Engine plugin.
        :param model: .xml file of pre trained model
        :param cpu_extension: extension for the CPU device
        :param device: Target device
        :param input_size: Number of input layers
        :param output_size: Number of output layers
        :param num_requests: Index of Infer request value. Limited to device capabilities.
        :return:  Shape of input layer
        """

        model_xml = model
        model_bin = os.path.splitext(model_xml)[0] + ".bin"
        # Plugin initialization for specified device
        # and load extensions library if specified
        log.info("Initializing plugin for {} device...".format(device))
        #self.plugin = IEPlugin(device=device)
        ie = IECore()
        #if cpu_extension and 'CPU' in device:
        if cpu_extension and 'CPU' == device:
            #self.plugin.add_cpu_extension(cpu_extension)
            ie.add_extension(cpu_extension, device)

        # Read IR
        log.info("Reading IR...")
        self.net = IENetwork(model=model_xml, weights=model_bin)
        log.info("Loading IR to the plugin...")

        if device == "CPU":
            supported_layers = ie.query_network(self.net, device)
            not_supported_layers = \
                [l for l in self.net.layers.keys() if l not in supported_layers]
            if len(not_supported_layers) != 0:
                log.error("Following layers are not supported by "
                          "the plugin for specified device {}:\n {}".format(
                              device, ', '.join(not_supported_layers)))
                log.error("Please try to specify cpu extensions library path"
                          " in command line parameters using -l "
                          "or --cpu_extension command line argument")
                sys.exit(1)

        if num_requests == 0:
            # Loads network read from IR to the plugin
            # self.net_plugin = self.plugin.load(network=self.net)
            self.net_plugin = ie.load_network(network=self.net,
                                              device_name=device)
        else:
            #self.net_plugin = self.plugin.load(network=self.net, num_requests=2,device_name=TARGET_DEVICE )
            self.net_plugin = ie.load_network(network=self.net,
                                              num_requests=2,
                                              device_name=device)

        self.input_blob = next(iter(self.net.inputs))
        self.out_blob = next(iter(self.net.outputs))
        assert len(self.net.inputs.keys()) == input_size,\
            "Supports only {} input topologies".format(len(self.net.inputs))
        assert len(self.net.outputs) == output_size, \
            "Supports only {} output topologies".format(len(self.net.outputs))

        return self.get_input_shape()
Exemplo n.º 20
0
def predict(img):
    model_xml = "C:\modeloptimizing\HTRModel.xml"
    model_bin = "C:\modeloptimizing\HTRModel.bin"
    ie = IECore()
    net = IENetwork(model=model_xml, weights=model_bin)
    input_blob = next(iter(net.inputs))
    n, c, h, w = net.inputs[input_blob].shape
    exec_net = ie.load_network(network=net, device_name="CPU")
    input_size = (1024, 128, 1)
    img = preprocess(img, input_size=input_size)
    img = normalization([img])
    img = np.squeeze(img, axis=3)
    img = np.expand_dims(img, axis=0)
    start = timer()
    print("Starting inference...")
    res = exec_net.infer(inputs={input_blob: img})
    end = timer()
    print("End inference time: ", 1000 * (end - start))
    output_data = res['dense/BiasAdd/Softmax']
    print(output_data)

    steps_done = 0
    steps = 1
    batch_size = int(np.ceil(len(output_data) / steps))
    input_length = len(max(output_data, key=len))
    predicts, probabilities = [], []

    while steps_done < steps:
        index = steps_done * batch_size
        until = index + batch_size

        x_test = np.asarray(output_data[index:until])
        x_test_len = np.asarray([input_length for _ in range(len(x_test))])

        decode, log = K.ctc_decode(x_test,
                                   x_test_len,
                                   greedy=False,
                                   beam_width=10,
                                   top_paths=3)
        probabilities.extend([np.exp(x) for x in log])
        decode = [[[int(p) for p in x if p != -1] for x in y] for y in decode]
        predicts.extend(np.swapaxes(decode, 0, 1))

        steps_done += 1

    for p in predicts:
        print(str(p))

    for pb in probabilities:
        print(str(pb))

    #interpretation of the data

    max_text_length = 128
    charset_base = string.printable[:95]
    tokenizer = Tokenizer(chars=charset_base, max_text_length=max_text_length)

    predicts = [[tokenizer.decode(x) for x in y] for y in predicts]

    print("\n####################################")
    for i, (pred, prob) in enumerate(zip(predicts, probabilities)):
        print("\nProb.  - Predict")
        for (pd, pb) in zip(pred, prob):
            print(f"{pb:.4f} - {pd}")
            if i == 0:
                pbperc = pb * 100
                pdfinal = pd
            i = 1 + i
    print("\n####################################")
    return pdfinal, pbperc
Exemplo n.º 21
0
    def load_model(self, model, batch_size, concurrency, device, cpu_ext=None):

        # Initialize the Inference Engine
        self.plugin = IECore()
        #self.plugin.set_config({'CPU_THREADS_NUM': '8'}, "CPU")
        #self.plugin.set_config({'DYN_BATCH_ENABLED': 'YES'}, "CPU")
        #print(self.plugin.available_devices)

        ### Add any necessary extensions ###
        if cpu_ext and device.lower() == 'cpu':
            self.plugin.add_extension(cpu_ext, device)

        # Initialize IENetwork object from IR files
        model_xml = model
        model_bin = os.path.splitext(model_xml)[0] + '.bin'
        self.network = IENetwork(model=model_xml, weights=model_bin)

        # Support topologies with 1 and 2 inputs

        self.image_tensor_blob = None
        self.image_info_blob = None

        for input_key, input_val in self.network.inputs.items():
            if len(input_val.shape) == 4:  # image tensor
                self.image_tensor_blob = input_key
            elif len(input_val.shape) == 2:  # image info
                self.image_info_blob = input_key

        assert self.image_tensor_blob is not None, \
            "Failed to find the input image specification"

        self.output_blob = next(iter(self.network.outputs))

        # Works for SSD models, but for Faster RCNN it fails:
        # RuntimeError: Failed to infer shapes for Reshape layer
        # (Reshape_Transpose_Class) with error: Invalid reshape mask
        # (dim attribute): number of elements in input: [7,2,12,1444]
        # and output: [1,24,38,38] mismatch
        ## This will reshape the network, so it can take
        ## several frames in a batch and also the output
        ## tensor will be (1,1,N*100,7) instead of (N,1,100,7).
        #input_shape = self.network.inputs[self.image_tensor_blob].shape
        #input_shape[0] = batch_size
        #self.network.reshape({self.image_tensor_blob: input_shape})

        # Set the network batch size
        self.network.batch_size = batch_size

        ### Check for unsupported layers ###

        supported_layers = self.plugin.query_network(network=self.network,
                                                     device_name=device)
        supported_layers = set(supported_layers.keys())
        net_layers = set(self.network.layers.keys())
        unsupported_layers = net_layers.difference(supported_layers)

        if unsupported_layers:
            raise Exception('Unsupported layers: ' +
                            ', '.join(unsupported_layers))

        ### Load the model ###
        self.exec_network = self.plugin.load_network(network=self.network,
                                                     device_name=device,
                                                     num_requests=concurrency)

        # Initialize member variables
        self.request_id = 0
        self.request_count = 0
        self.concurrency = concurrency
Exemplo n.º 22
0
def main():
    log.basicConfig(format="[ %(levelname)s ] %(message)s",
                    level=log.INFO,
                    stream=sys.stdout)
    args = build_argparser().parse_args()
    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"

    # Plugin initialization for specified device and load extensions library if specified
    log.info("Creating Inference Engine")
    ie = IECore()
    if args.cpu_extension and 'CPU' in args.device:
        ie.add_extension(args.cpu_extension, "CPU")
    # Read IR
    log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
    net = IENetwork(model=model_xml, weights=model_bin)

    if "CPU" in args.device:
        supported_layers = ie.query_network(net, "CPU")
        not_supported_layers = [
            l for l in net.layers.keys() if l not in supported_layers
        ]
        if len(not_supported_layers) != 0:
            log.error(
                "Following layers are not supported by the plugin for specified device {}:\n {}"
                .format(args.device, ', '.join(not_supported_layers)))
            log.error(
                "Please try to specify cpu extensions library path in sample's command line parameters using -l "
                "or --cpu_extension command line argument")
            sys.exit(1)

    assert len(
        net.inputs.keys()) == 1, "Sample supports only single input topologies"
    assert len(
        net.outputs) == 1, "Sample supports only single output topologies"

    log.info("Preparing input blobs")
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))
    net.batch_size = len(args.input)

    # Read and pre-process input images
    n, c, h, w = net.inputs[input_blob].shape
    images = np.ndarray(shape=(n, c, h, w))
    for i in range(n):
        image = cv2.imread(args.input[i])
        if image.shape[:-1] != (h, w):
            log.warning("Image {} is resized from {} to {}".format(
                args.input[i], image.shape[:-1], (h, w)))
            image = cv2.resize(image, (w, h))
        image = image.transpose(
            (2, 0, 1))  # Change data layout from HWC to CHW
        images[i] = image
    log.info("Batch size is {}".format(n))

    # Loading model to the plugin
    log.info("Loading model to the plugin")
    exec_net = ie.load_network(network=net, device_name=args.device)

    # Start sync inference
    log.info("Starting inference in synchronous mode")
    res = exec_net.infer(inputs={input_blob: images})

    # Processing output blob
    log.info("Processing output blob")
    res = res[out_blob]
    log.info("Top {} results: ".format(args.number_top))
    if args.labels:
        with open(args.labels, 'r') as f:
            labels_map = [x.split(sep=' ', maxsplit=1)[-1].strip() for x in f]
    else:
        labels_map = None
    classid_str = "classid"
    probability_str = "probability"
    for i, probs in enumerate(res):
        probs = np.squeeze(probs)
        top_ind = np.argsort(probs)[-args.number_top:][::-1]
        print("Image {}\n".format(args.input[i]))
        print(classid_str, probability_str)
        print("{} {}".format('-' * len(classid_str),
                             '-' * len(probability_str)))
        for id in top_ind:
            det_label = labels_map[id] if labels_map else "{}".format(id)
            label_length = len(det_label)
            space_num_before = (len(classid_str) - label_length) // 2
            space_num_after = len(classid_str) - (space_num_before +
                                                  label_length) + 2
            space_num_before_prob = (len(probability_str) -
                                     len(str(probs[id]))) // 2
            print("{}{}{}{}{:.7f}".format(' ' * space_num_before, det_label,
                                          ' ' * space_num_after,
                                          ' ' * space_num_before_prob,
                                          probs[id]))
        print("\n")
    log.info(
        "This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool\n"
    )
Exemplo n.º 23
0
def main():
    args = build_argparser().parse_args()
    model_name = args.model
    device = args.device
    video_file = args.video
    max_people = args.max_people
    threshold = args.threshold
    output_path = args.output_path

    ## Load Model
    model_weights = model_name + '.bin'
    model_structure = model_name + '.xml'
    # Loading time
    start_model_load_time = time.time()
    # Extension
    CPU_EXTENSION = "/opt/intel/openvino/deployment_tools/inference_engine/lib/intel64/libcpu_extension_sse4.so"

    # Read the model
    core = IECore()
    #model = core.read_network(model=model_structure, weights=model_weights) # new openvino version
    model = IENetwork(model=model_structure,
                      weights=model_weights)  #old openvino version
    # Add CPU extension
    core.add_extension(CPU_EXTENSION, device)

    # Load the network into an executable network
    exec_network = core.load_network(network=model,
                                     device_name=device,
                                     num_requests=1)
    print("Model is loaded")

    # Time to load the model
    total_model_load_time = time.time() - start_model_load_time
    print("Time to load model: " + str(total_model_load_time))

    # Get the input layer
    input_name = next(iter(model.inputs))
    input_shape = model.inputs[input_name].shape
    output_name = next(iter(model.outputs))
    output_shape = model.outputs[output_name].shape

    print("input_name:" + str(input_name))
    print("input_shape:" + str(input_shape))
    print("output_name: " + str(output_name))
    print("output_shape: " + str(output_shape))

    # Get the input shape
    n, c, h, w = (core, input_shape)[1]

    # Get the input video stream
    cap = cv2.VideoCapture(video_file)

    # Information about the input video stream
    initial_w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    initial_h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    video_len = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    fps = int(cap.get(cv2.CAP_PROP_FPS))

    print("initial_w: " + str(initial_w))
    print("initial_h: " + str(initial_h))
    print("video_len: " + str(video_len))
    print("fps: " + str(fps))

    # Define output video
    out_video = cv2.VideoWriter(os.path.join(output_path, 'output_video3.mp4'),
                                cv2.VideoWriter_fourcc(*'avc1'), fps,
                                (initial_w, initial_h), True)
    request_id = 0

    ### Read from the video capture ###
    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break
        key_pressed = cv2.waitKey(60)

        # Pre-process the image as needed
        image = cv2.resize(frame, (w, h))
        image = image.transpose((2, 0, 1))
        image = image.reshape((n, c, h, w))
        print("n-c-h-w " + str(n) + "-" + str(c) + "-" + str(h) + "-" + str(w))

        # Start asynchronous inference for specified request
        start_inference_time = time.time()
        infer_request_handle = async_inference(exec_network, input_name, image)

        # Get the output data
        res = get_output(exec_network,
                         infer_request_handle,
                         output_name,
                         request_id=0,
                         output=None)
        detection_time = time.time() - start_inference_time
        print("Detection time: " + str(detection_time))

        # Draw Bounding Box
        frame = boundingbox(res, initial_w, initial_h, frame, threshold)

        # Write the output video
        out_video.write(frame)

    cap.release()
    cv2.destroyAllWindows()
Exemplo n.º 24
0
class ModelOpenVINO(object):
    def __init__(self,
                 xml_file_path,
                 bin_file_path=None,
                 mapping_file_path=None,
                 device='CPU',
                 required_inputs=None,
                 required_outputs=None,
                 max_num_requests=1,
                 collect_perf_counters=False,
                 cfg=None,
                 classes=None):

        from openvino.inference_engine import IENetwork

        ie = IECore()
        logging.info('Reading network from IR...')
        if bin_file_path is None:
            bin_file_path = osp.splitext(xml_file_path)[0] + '.bin'
        if mapping_file_path is None:
            mapping_file_path = osp.splitext(xml_file_path)[0] + '.mapping'

        self.net = IENetwork(model=xml_file_path, weights=bin_file_path)

        self.orig_ir_mapping = self.get_mapping(mapping_file_path)
        self.ir_orig_mapping = {v: k for k, v in self.orig_ir_mapping.items()}

        self.net_inputs_mapping = OrderedDict({})
        self.net_outputs_mapping = OrderedDict({})
        self.configure_inputs(required_inputs)
        self.configure_outputs(required_outputs)

        if 'CPU' in device:
            self.check_cpu_support(ie, self.net)

        logging.info('Loading network to plugin...')
        self.max_num_requests = max_num_requests
        self.exec_net = ie.load_network(network=self.net,
                                        device_name=device,
                                        num_requests=max_num_requests)

        self.perf_counters = None
        if collect_perf_counters:
            self.perf_counters = PerformanceCounters()

        self.pt_model = None
        if cfg is not None:
            self.pt_model = build_detector(cfg.model,
                                           train_cfg=None,
                                           test_cfg=cfg.test_cfg)
            if classes is not None:
                self.pt_model.CLASSES = classes

    @staticmethod
    def check_cpu_support(ie, net):
        logging.info('Check that all layers are supported...')
        supported_layers = ie.query_network(net, 'CPU')
        not_supported_layers = [
            l for l in net.layers.keys() if l not in supported_layers
        ]
        if len(not_supported_layers) != 0:
            unsupported_info = '\n\t'.join(
                '{} ({} with params {})'.format(
                    layer_id, net.layers[layer_id].type,
                    str(net.layers[layer_id].params))
                for layer_id in not_supported_layers)
            logging.warning(
                'Following layers are not supported '
                'by the CPU plugin:\n\t{}'.format(unsupported_info))
            logging.warning(
                'Please try to specify cpu extensions library path.')
            raise ValueError('Some of the layers are not supported.')

    def get_mapping(self, mapping_file_path=None):
        mapping = {}
        if mapping_file_path is not None:
            logging.info('Loading mapping file...')
            root = etree.parse(mapping_file_path).getroot()
            for m in root:
                if m.tag != 'map':
                    continue
                framework = m.find('framework')
                ir = m.find('IR')
                framework_name = framework.get('name')
                ir_name = ir.get('name')
                mapping[framework_name] = ir_name
                if framework_name != ir_name:
                    # FIXME. This may not be correct for all operations.
                    mapping[framework_name] += '.0'
        return mapping

    def try_add_extra_outputs(self, extra_outputs):
        if extra_outputs is None:
            return
        for extra_output in extra_outputs:
            if extra_output not in self.orig_ir_mapping:
                continue
            ir_name = self.orig_ir_mapping[extra_output]
            try:
                self.net.add_outputs(ir_name)
            except RuntimeError:
                pass

    def configure_inputs(self, required):
        self.net_inputs_mapping = OrderedDict(
            (i, i) for i in self.net.inputs.keys())
        self.check_required(self.net_inputs_mapping.keys(), required)

    def configure_outputs(self, required):
        self.try_add_extra_outputs(required)
        self.net_outputs_mapping = OrderedDict(
            (i, self.ir_orig_mapping[i]) for i in self.net.outputs.keys())
        self.check_required(self.orig_ir_mapping.keys(), required)

    def set_outputs(self, outputs):
        self.check_required(self.orig_ir_mapping.keys(), outputs)
        self.net_outputs_mapping = OrderedDict(
            (self.orig_ir_mapping[i], i) for i in outputs)

    @staticmethod
    def check_required(available, required):
        if required is None:
            return
        for x in required:
            if x not in available:
                raise ValueError(
                    f'Failed to identify data blob with name "{x}"')

    def rename_outputs(self, outputs):
        return {
            self.net_outputs_mapping[k]: v
            for k, v in outputs.items() if k in self.net_outputs_mapping
        }

    def unify_inputs(self, inputs):
        if not isinstance(inputs, dict):
            if len(self.net_inputs_mapping) == 1 and not isinstance(
                    inputs, (list, tuple)):
                inputs = [inputs]
            inputs = {
                k: v
                for (k, _), v in zip(self.net_inputs_mapping.items(), inputs)
            }
        inputs = {self.net_inputs_mapping[k]: v for k, v in inputs.items()}
        return inputs

    def __call__(self, inputs):
        inputs = self.unify_inputs(inputs)
        outputs = self.exec_net.infer(inputs)
        if self.perf_counters:
            perf_counters = self.exec_net.requests[0].get_perf_counts()
            self.perf_counters.update(perf_counters)
        return self.rename_outputs(outputs)

    def print_performance_counters(self):
        if self.perf_counters:
            self.perf_counters.print()

    def show(self, data, result, dataset=None, score_thr=0.3, wait_time=0):
        if self.pt_model is not None:
            self.pt_model.show_result(data,
                                      result,
                                      dataset=dataset,
                                      score_thr=score_thr,
                                      wait_time=wait_time)
Exemplo n.º 25
0
from openvino.inference_engine import IENetwork, IECore

import numpy as np
import time

# Loading model
model_path = 'sep_cnn/sep_cnn'
model_weights = model_path + '.bin'
model_structure = model_path + '.xml'

# TODO: Load the model
model = IENetwork(model_structure, model_weights)

core = IECore()
net = core.load_network(network=model, device_name='CPU', num_requests=1)
print('Model loaded successfully')

input_name = next(iter(model.inputs))

# Reading and Preprocessing Image
input_img = np.load('image.npy')
input_img = input_img.reshape(1, 28, 28)

input_dict = {input_name: input_img}

# TODO: Using the input image, run inference on the model for 10 iterations
start = time.time()
for _ in range(10):
    net.infer(input_dict)

# TODO: Finish the print statement
Exemplo n.º 26
0
def main():
    log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
    args = build_argparser().parse_args()
    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"

    # Plugin initialization for specified device and load extensions library if specified
    plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)

    # Configure plugin to support dynamic batch size
    plugin.set_config({"DYN_BATCH_ENABLED": "YES"})

    # Load cpu_extensions library if specified
    if args.cpu_extension and 'CPU' in args.device:
        plugin.add_cpu_extension(args.cpu_extension)

    # Read IR
    log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
    net = IENetwork(model=model_xml, weights=model_bin)

    # Check for unsupported layers if the device is 'CPU'
    if plugin.device == "CPU":
        unsupported_layers = [layer for layer in net.layers if layer not in plugin.get_supported_layers(net)]
        if len(unsupported_layers) != 0:
            log.error("Following layers are not supported by the plugin for specified device {}:\n {}".
                      format(plugin.device, ', '.join(unsupported_layers)))
            log.error("Please try to specify cpu extensions library path in sample's command line parameters using -l "
                      "or --cpu_extension command line argument")
            sys.exit(1)

    assert len(net.inputs.keys()) == 1, "Sample supports only single input topologies"

    log.info("Preparing input blobs")
    input_blob = next(iter(net.inputs))

    # Set max batch size
    inputs_count = len(args.input)
    if args.max_batch < inputs_count:
        log.warning("Defined max_batch size {} less than input images count {}."
                    "\n\t\t\tInput images count will be used as max batch size".format(args.max_batch, inputs_count))
    net.batch_size = max(args.max_batch, inputs_count)

    # Create numpy array for the max_batch size images
    n, c, h, w = net.inputs[input_blob].shape
    images = np.zeros(shape=(n, c, h, w))

    # Read and pre-process input images
    for i in range(inputs_count):
        image = cv2.imread(args.input[i])
        if image.shape[:-1] != (h, w):
            log.warning("Image {} is resized from {} to {}".format(args.input[i], image.shape[:-1], (h, w)))
            image = cv2.resize(image, (w, h))
        image = image.transpose((2, 0, 1))  # Change data layout from HWC to CHW
        images[i] = image
    log.info("Batch size is {}".format(n))

    # Loading model to the plugin
    log.info("Loading model to the plugin")
    exec_net = plugin.load(network=net)

    def infer():
        for i in range(args.number_iter):
            t0 = time()
            exec_net.infer(inputs={input_blob: images})
            infer_time.append((time() - t0) * 1000)
        log.info("Average running time of one iteration: {} ms".format(np.average(np.asarray(infer_time))))
        if args.perf_counts:
            perf_counts = exec_net.requests[0].get_perf_counts()
            log.info("Performance counters:")
            print("{:<70} {:<15} {:<15} {:<15} {:<10}".format('name', 'layer_type', 'exet_type', 'status',
                                                              'real_time, us'))
            for layer, stats in perf_counts.items():
                print("{:<70} {:<15} {:<15} {:<15} {:<10}".format(layer, stats['layer_type'], stats['exec_type'],
                                                                  stats['status'], stats['real_time']))

    # Start sync inference with full batch size
    log.info(
        "Starting inference with full batch {} ({} iterations)".format(n, args.number_iter)
    )
    infer_time = []
    infer()

    # Set batch size dynamically for the infer request and start sync inference
    infer_time = []
    exec_net.requests[0].set_batch(inputs_count)
    log.info("Starting inference with dynamically defined batch {} for the 2nd infer request ({} iterations)".format(
        inputs_count, args.number_iter))
    infer()
Exemplo n.º 27
0
def get_input_shape(model):
    """GIven a model, returns its input shape"""
    model_bin = model[:-3] + "bin"
    net = IENetwork(model=model, weights=model_bin)
    input_blob = next(iter(net.inputs))
    return net.inputs[input_blob].shape
    def load_model(self):

        #self.core = IECore()
        #self.net = self.core.load_network(network = self.model, device_name = args.device, num_requests = 1)
        self.model = IENetwork(self.model_structure, self.model_weights)
        self.core = IECore()
Exemplo n.º 29
0
                  type=str,
                  default="MYRIAD",
                  help="Default MYRIAD or CPU")
args = args.parse_args()

input_image_size = (300, 300)

data_type = "FP16"
if args.device == "CPU": data_type = "FP32"

#STEP-2
model_xml = 'tfnet/' + data_type + '/yolov2-voc.xml'
model_bin = 'tfnet/' + data_type + '/yolov2-voc.bin'
model_xml = os.environ['HOME'] + "/" + model_xml
model_bin = os.environ['HOME'] + "/" + model_bin
net = IENetwork(model=model_xml, weights=model_bin)

#STEP-3
print(model_bin, "on", args.device)
plugin = IEPlugin(device=args.device, plugin_dirs=None)
if args.device == "CPU":
    HOME = os.environ['HOME']
    PATHLIBEXTENSION = os.getenv(
        "PATHLIBEXTENSION", HOME +
        "/inference_engine_samples_build/intel64/Release/lib/libcpu_extension.so"
    )
    plugin.add_cpu_extension(PATHLIBEXTENSION)

exec_net = plugin.load(network=net, num_requests=1)

#STEP-4
Exemplo n.º 30
0
                         action='store_true', default=False)
    return parser


if __name__ == '__main__':
    args = build_arg().parse_args()
    model_path = os.path.splitext(args.model)[0]
    weights_bin = model_path + ".bin"
    coeffs = args.coeffs

    # mean is stored in the source caffe model and passed to IR
    log.basicConfig(format="[ %(levelname)s ] %(message)s",
                    level=log.INFO if not args.verbose else log.DEBUG, stream=sys.stdout)

    log.debug("Load network")
    load_net = IENetwork(model=args.model, weights=weights_bin)
    load_net.batch_size = 1
    exec_net = IECore().load_network(network=load_net, device_name=args.device)

    assert len(load_net.inputs) == 1, "Expected number of inputs is equal 1"
    input_blob = next(iter(load_net.inputs))
    input_shape = load_net.inputs[input_blob].shape
    assert input_shape[1] == 1, "Expected model input shape with 1 channel"

    assert len(load_net.outputs) == 1, "Expected number of outputs is equal 1"
    output_blob = next(iter(load_net.outputs))
    output_shape = load_net.outputs[output_blob].shape
    assert output_shape == [1, 313, 56, 56], "Shape of outputs does not match network shape outputs"

    _, _, h_in, w_in = input_shape
def main():
    log.basicConfig(format="[ %(levelname)s ] %(message)s",
                    level=log.INFO,
                    stream=sys.stdout)
    args = build_argparser().parse_args()
    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"

    # Plugin initialization for specified device and load extensions library if specified
    log.info("Initializing plugin for {} device...".format(args.device))
    ie = IECore()
    #plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
    if args.cpu_extension and 'CPU' in args.device:
        log.info("Loading plugins for {} device...".format(args.device))
        #ie.add_extension(args.cpu_extension, args.device) // openvino2019-R1
        ie.add_extension(args.cpu_extension, "CPU")

    # Read IR
    log.info("Reading IR...")
    net = IENetwork(model=model_xml, weights=model_bin)

    if "CPU" in args.device:
        supported_layers = ie.query_network(net, "CPU")
        not_supported_layers = [
            l for l in net.layers.keys() if l not in supported_layers
        ]
        if len(not_supported_layers) != 0:
            log.error(
                "Following layers are not supported by the plugin for specified device {}:\n {}"
                .format(args.device, ', '.join(not_supported_layers)))
            log.error(
                "Please try to specify cpu extensions library path in sample's command line parameters using -l "
                "or --cpu_extension command line argument")
            sys.exit(1)
    assert len(
        net.inputs.keys()) == 1, "Sample supports only single input topologies"
    assert len(
        net.outputs) == 1, "Sample supports only single output topologies"

    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))

    if args.input == 'cam':
        input_stream = 0
        out_file_name = 'cam'
    else:
        input_stream = args.input
        assert os.path.isfile(args.input), "Specified input file doesn't exist"
        out_file_name = os.path.splitext(os.path.basename(args.input))[0]

    log.info("Loading IR to the plugin...")
    exec_net = ie.load_network(network=net,
                               num_requests=args.number_infer_requests,
                               device_name=args.device)

    log.info(
        "Starting inference in async mode, {} requests in parallel...".format(
            args.number_infer_requests))
    job_id = str(os.environ['PBS_JOBID'])
    result_file = open(
        os.path.join(args.output_dir, 'output_' + job_id + '.txt'), "w")
    pre_infer_file = os.path.join(args.output_dir,
                                  'pre_progress_' + job_id + '.txt')
    infer_file = os.path.join(args.output_dir, 'i_progress_' + job_id + '.txt')
    processed_vid = '/tmp/processed_vid.bin'

    # Read and pre-process input image
    if isinstance(net.inputs[input_blob], list):
        n, c, h, w = net.inputs[input_blob]
    else:
        n, c, h, w = net.inputs[input_blob].shape
    del net

    cap = cv2.VideoCapture(input_stream)
    video_len = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    if video_len < args.number_infer_requests:
        args.number_infer_requests = video_len
    #Pre inference processing, read mp4 frame by frame, process using openCV and write to binary file
    width = int(cap.get(3))
    height = int(cap.get(4))
    CHUNKSIZE = n * c * w * h
    id_ = 0
    with open(processed_vid, 'w+b') as f:
        time_start = time.time()
        while cap.isOpened():
            ret, next_frame = cap.read()
            if not ret:
                break
            in_frame = cv2.resize(next_frame, (w, h))
            in_frame = in_frame.transpose(
                (2, 0, 1))  # Change data layout from HWC to CHW
            in_frame = in_frame.reshape((n, c, h, w))
            bin_frame = bytearray(in_frame)
            f.write(bin_frame)
            id_ += 1
            if id_ % 10 == 0:
                progressUpdate(pre_infer_file,
                               time.time() - time_start, id_, video_len)
    cap.release()

    if args.labels:
        with open(args.labels, 'r') as f:
            labels_map = [x.strip() for x in f]
    else:
        labels_map = None

    current_inference = 0
    previous_inference = 1 - args.number_infer_requests
    infer_requests = exec_net.requests
    frame_count = 0

    try:
        infer_time_start = time.time()
        with open(processed_vid, "rb") as data:
            while frame_count < video_len:
                # Read next frame from input stream if available and submit it for inference
                byte = data.read(CHUNKSIZE)
                if not byte == b"":
                    deserialized_bytes = np.frombuffer(byte, dtype=np.uint8)
                    in_frame = np.reshape(deserialized_bytes,
                                          newshape=(n, c, h, w))
                    exec_net.start_async(request_id=current_inference,
                                         inputs={input_blob: in_frame})

                # Retrieve the output of an earlier inference request
                if previous_inference >= 0:
                    status = infer_requests[previous_inference].wait()
                    if status is not 0:
                        raise Exception(
                            "Infer request not completed successfully")
                    # Parse inference results
                    res = infer_requests[previous_inference].outputs[out_blob]
                    processBoxes(frame_count, res, labels_map,
                                 args.prob_threshold, width, height,
                                 result_file)
                    frame_count += 1

                # Write data to progress tracker
                if frame_count % 10 == 0:
                    progressUpdate(infer_file,
                                   time.time() - infer_time_start,
                                   frame_count + 1, video_len + 1)

                # Increment counter for the inference queue and roll them over if necessary
                current_inference += 1
                if current_inference >= args.number_infer_requests:
                    current_inference = 0

                previous_inference += 1
                if previous_inference >= args.number_infer_requests:
                    previous_inference = 0

        # End while loop
        total_time = time.time() - infer_time_start
        with open(os.path.join(args.output_dir, 'stats_{}.txt'.format(job_id)),
                  'w') as f:
            f.write('{:.3g} \n'.format(total_time))
            f.write('{} \n'.format(frame_count))

        result_file.close()

    finally:
        log.info("Processing done...")
        del exec_net
Exemplo n.º 32
0
import argparse

parser = argparse.ArgumentParser(
    description='Run video super resolution with OpenVINO')
parser.add_argument('-i', dest='input', help='Path to input video')
parser.add_argument('-m',
                    dest='model',
                    default='single-image-super-resolution-1033',
                    help='Path to the model')
parser.add_argument('-o', dest='output', help='Path to output')

args = parser.parse_args()

# Setup network
net = IENetwork(args.model + '.xml', args.model + '.bin')

# Read a video stream from file
cap = cv.VideoCapture(args.input)
inp_w = int(cap.get(cv.CAP_PROP_FRAME_WIDTH))
inp_h = int(cap.get(cv.CAP_PROP_FRAME_HEIGHT))
fps = int(cap.get(cv.CAP_PROP_FPS))

out_h, out_w = inp_h * 3, inp_w * 3  # Do not change! This is how model works

c1 = net.layers['79/Cast_11815_const']
c1.blobs['custom'][4] = inp_h
c1.blobs['custom'][5] = inp_w

c2 = net.layers['86/Cast_11811_const']
c2.blobs['custom'][2] = out_h