def run_ie_on_dataset(model_xml, model_bin, cpu_extension_path, images_dir, prob_threshold=0.01):
    plugin = IEPlugin(device='CPU')
    plugin.add_cpu_extension(cpu_extension_path)
    net = IENetwork.from_ir(model=model_xml, weights=model_bin)
    assert len(net.inputs.keys()) == 1, "Sample supports only single input topologies"
    assert len(net.outputs) == 1, "Sample supports only single output topologies"
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))
    exec_net = plugin.load(network=net, num_requests=2)
    num, chs, height, width = net.inputs[input_blob]
    del net
    cur_request_id = 0

    detection_data = []
    for image in os.listdir(images_dir):
      im_path = os.path.join(images_dir, image)
      frame = cv2.imread(im_path)
      initial_h, initial_w, _ = frame.shape
      in_frame = cv2.resize(frame, (width, height))
      in_frame = in_frame.transpose((2, 0, 1))  # Change data layout from HWC to CHW
      in_frame = in_frame.reshape((num, chs, height, width))

      objects_per_image = []
      exec_net.start_async(request_id=cur_request_id, inputs={input_blob: in_frame})

      if exec_net.requests[cur_request_id].wait(-1) == 0:
        res = exec_net.requests[cur_request_id].outputs[out_blob]
        for obj in res[0][0]:
          if obj[2] > prob_threshold:
            xmin = int(obj[3] * initial_w)
            ymin = int(obj[4] * initial_h)
            xmax = int(obj[5] * initial_w)
            ymax = int(obj[6] * initial_h)
            class_id = int(obj[1])
            conf = obj[2]
            objects_per_image.append({'bbox': [xmin, ymin, xmax, ymax], 'class_id': class_id, 'score': conf})

      det_item = {'image': im_path, 'objects': objects_per_image}
      detection_data.append(det_item)

    del exec_net
    del plugin

    return detection_data
Exemplo n.º 2
0
def main():

    job_id = os.environ['PBS_JOBID']
    job_id = job_id.rstrip().split('.')[0]
    codec = data_utils.TextFeatureIO(char_dict_path='Config/char_dict.json',
                                     ord_map_dict_path=r'Config/ord_map.json')
    log.basicConfig(format="[ %(levelname)s ] %(message)s",
                    level=log.INFO,
                    stream=sys.stdout)
    args = build_argparser().parse_args()
    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"

    # Plugin initialization for specified device and load extensions library if specified
    plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
    if args.cpu_extension and 'CPU' in args.device:
        plugin.add_cpu_extension(args.cpu_extension)
    # Read IR
    log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
    net = IENetwork(model=model_xml, weights=model_bin)

    # if plugin.device == "CPU":
    # supported_layers = plugin.get_supported_layers(net)
    # not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]
    # if len(not_supported_layers) != 0:
    # log.error("Following layers are not supported by the plugin for specified device {}:\n {}".
    # format(plugin.device, ', '.join(not_supported_layers)))
    # log.error("Please try to specify cpu extensions library path in sample's command line parameters using -l "
    # "or --cpu_extension command line argument")
    # sys.exit(1)

    assert len(
        net.inputs.keys()) == 1, "Sample supports only single input topologies"
    assert len(
        net.outputs) == 1, "Sample supports only single output topologies"

    log.info("Preparing input blobs")
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))
    net.batch_size = len(args.input)

    # Read and pre-process input images
    n, c, h, w = net.inputs[input_blob].shape
    images = np.ndarray(shape=(n, c, h, w))
    for i in range(n):
        image = cv2.imread(args.input[i])
        if image.shape[:-1] != (h, w):
            log.warning("Image {} is resized from {} to {}".format(
                args.input[i], image.shape[:-1], (h, w)))
            image = cv2.resize(image, (w, h))
        image = image.transpose(
            (2, 0, 1))  # Change data layout from HWC to CHW
        images[i] = image
    log.info("Batch size is {}".format(n))

    # Loading model to the plugin
    log.info("Loading model to the plugin")
    exec_net = plugin.load(network=net)
    del net

    # Start sync inference
    log.info("Starting inference ({} iterations)".format(args.number_iter))
    infer_time = []

    result_dir = os.path.join(args.output_dir, job_id)

    if not os.path.isdir(result_dir):
        print(result_dir)
        os.makedirs(result_dir, exist_ok=True)
    infer_file = os.path.join(result_dir, 'i_progress.txt')
    t0 = time.time()
    for i in range(args.number_iter):
        res = exec_net.infer(inputs={input_blob: images})
        if i % 10 == 0 or i == args.number_iter - 1:
            progressUpdate(infer_file,
                           time.time() - t0, i + 1, args.number_iter)
    t1 = (time.time() - t0)
    log.info("Average running time of one iteration: {} ms".format(
        np.average(np.asarray(infer_time))))
    if args.perf_counts:
        perf_counts = exec_net.requests[0].get_perf_counts()
        log.info("Performance counters:")
        print("{:<70} {:<15} {:<15} {:<15} {:<10}".format(
            'name', 'layer_type', 'exet_type', 'status', 'real_time, us'))
        for layer, stats in perf_counts.items():
            print("{:<70} {:<15} {:<15} {:<15} {:<10}".format(
                layer, stats['layer_type'], stats['exec_type'],
                stats['status'], stats['real_time']))

    # Processing output blob
    log.info("Processing output blob")
    res = res[out_blob]

    preds = res.argmax(2)
    preds = preds.transpose(1, 0)
    preds = np.ascontiguousarray(preds, dtype=np.int8).view(dtype=np.int8)
    values = codec.writer.ordtochar(preds[0].tolist())
    values = [v for i, v in enumerate(values) if i == 0 or v != values[i - 1]]
    values = [x for x in values if x != ' ']
    res = ''.join(values)
    print("The result is : " + res)

    avg_time = round((t1 * 1000 / args.number_iter), 3)
    with open(os.path.join(args.output_dir, job_id, 'result.txt'), 'w') as f:
        f.write(res + "\n Inference performed in " + str(avg_time) + "ms")

    stats = {}
    stats['time'] = str(round(t1, 2))
    stats['frames'] = str(args.number_iter * n)
    stats['fps'] = str(round(args.number_iter * n / t1, 2))
    stats_file = result_dir + "/stats.json"
    with open(stats_file, 'w') as f:
        json.dump(stats, f)

    del exec_net
    del plugin
def prepare_model(log, model, weights, cpu_extension, device_list, plugin_dir,
                  thread_num, stream_num):
    model_xml = model
    model_bin = weights
    if len(device_list) == 1:
        device = device_list[0]
    elif len(device_list) == 2:
        device = 'HETERO:{},{}'.format(device_list[0], device_list[1])
    else:
        log.error('Wrong count devices')
        sys.exit(1)
    log.info('Plugin initialization.');
    plugin = IEPlugin(device = device, plugin_dirs = plugin_dir)
    if cpu_extension and 'CPU' in device:
        plugin.add_cpu_extension(cpu_extension)
    log.info('Loading network files:\n\t {0}\n\t {1}'.format(
        model_xml, model_bin))
    net = IENetwork(model = model_xml, weights = model_bin)
    if plugin.device == 'CPU':
        supported_layers = plugin.get_supported_layers(net)
        not_supported_layers = [ l for l in net.layers.keys() \
            if l not in supported_layers ]
        if len(not_supported_layers) != 0:
            log.error('Following layers are not supported by the plugin \
                for specified device {0}:\n {1}'.format(plugin.device,
                ', '.join(not_supported_layers)))
            log.error('Please try to specify cpu extensions library path in \
                sample\'s command line parameters using -l or --cpu_extension \
                command line argument')
            sys.exit(1)
    if thread_num is not None:
        if 'CPU' in device_list:
            plugin.set_config({'CPU_THREADS_NUM': str(thread_num)})
        else:
            log.error('Parameter : Number of threads is used only for CPU')
            sys.exit(1)
    if stream_num is not None:
        if 'CPU' in device_list:
            plugin.set_config({'CPU_THROUGHPUT_STREAMS': str(stream_num)})
        else:
            log.error('Parameter : Number of streams is used only for CPU')
            sys.exit(1)
    if len(device_list) == 2:
        plugin.set_config({'TARGET_FALLBACK': device})
        plugin.set_initial_affinity(net)
    return net, plugin
class Network:
    """
    Load and configure inference plugins for the specified target devices 
    and performs synchronous and asynchronous modes for the specified infer requests.
    """

    def __init__(self):
        ### TODO: Initialize any class variables desired ###
        self.network = None
        self.plugin = None
        self.input_blob = None
        self.out_blob = None
        self.exec_network = None
        self.infer_request_handle = None

    def load_model(self, model_xml, device, input_size, output_size, num_requests, cpu_extension=None, plugin=None):
        model_bin = os.path.splitext(model_xml)[0] + '.bin'
        
        if not plugin:
            log.info("Initializing plugin for {} device...".format(device))
            self.plugin = IEPlugin(device = device)
        else:
            self.plugin = plugin
        
        ### TODO: Load the model ###
        # Add Extensions
        if cpu_extension and 'CPU' in device:
            self.plugin.add_cpu_extension(cpu_extension)
        
        # Read IR
        log.info("Reading the Intermediate Representative(IR). . .")
        self.network = IENetwork(model=model_xml, weights=model_bin)
        log.info("Loading IR to the plugin. . .")
        
        if self.plugin.device == "CPU":
            supported_layers = self.plugin.get_supported_layers(self.network)
            unSupported_layers = [l for l in self.network.layers.keys() if l not in supported_layers]

            if len(unSupported_layers) != 0:
                log.error("The Following layers are not supported by the plugin for specified device {}:\n {}".
                          format(self.plugin.device, ', '.join(unSupported_layers)))
                log.error("Please specify cpu extensions library path in command line parameters using -l "
                          "or --cpu_extension command line argument")
                sys.exit(1)
        
        # Loads network read from IR to the plugin        
        if num_requests == 0:
            self.exec_network = self.plugin.load(network=self.network)
        else:
            self.exec_network = self.plugin.load(network=self.network, num_requests=num_requests)
        return self.plugin, self.get_input_shape()

    def get_input_shape(self):
        ### TODO: Return the shape of the input layer ###
        self.input_blob = next(iter(self.network.inputs))
        self.output_blob = next(iter(self.network.outputs))
        
        return self.network.inputs[self.input_blob].shape
    
    def exec_net(self,request_id,frame):
        self.infer_request_handle = self.exec_network.start_async(request_id=request_id, inputs={self.input_blob: frame})
        
        return self.exec_network
    

    def wait(self,request_id):
        status = self.exec_network.requests[request_id].wait(-1)
        
        return status

    def get_output(self,request_id,output=None):
        if output == 0:
            result = self.infer_request_handle.outputs[output]
        else:
            result = self.exec_network.requests[request_id].outputs[self.output_blob]
        
        return result
Exemplo n.º 5
0
def main():
    log.basicConfig(format="[ %(levelname)s ] %(message)s",
                    level=log.INFO,
                    stream=sys.stdout)
    args = build_argparser().parse_args()
    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"

    # Plugin initialization for specified device and load extensions library if specified
    plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
    if args.cpu_extension and 'CPU' in args.device:
        plugin.add_cpu_extension(args.cpu_extension)
    # Read IR
    log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
    net = IENetwork(model=model_xml, weights=model_bin)

    if plugin.device == "CPU":
        supported_layers = plugin.get_supported_layers(net)
        not_supported_layers = [
            l for l in net.layers.keys() if l not in supported_layers
        ]
        if len(not_supported_layers) != 0:
            log.error(
                "Following layers are not supported by the plugin for specified device {}:\n {}"
                .format(plugin.device, ', '.join(not_supported_layers)))
            log.error(
                "Please try to specify cpu extensions library path in sample's command line parameters using -l "
                "or --cpu_extension command line argument")
            sys.exit(1)

    assert len(
        net.inputs.keys()) == 1, "Sample supports only single input topologies"
    assert len(
        net.outputs) == 1, "Sample supports only single output topologies"

    log.info("Preparing input blobs")
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))
    net.batch_size = len(args.input)

    # Read and pre-process input images
    n, c, h, w = net.inputs[input_blob].shape
    images = np.ndarray(shape=(n, c, h, w))
    for i in range(n):
        image = cv2.imread(args.input[i])
        original_h = image.shape[0]
        original_w = image.shape[1]
        if image.shape[:-1] != (h, w):
            log.warning("Image {} is resized from {} to {}".format(
                args.input[i], image.shape[:-1], (h, w)))
            image = cv2.resize(image, (w, h))
        image = image.transpose(
            (2, 0, 1))  # Change data layout from HWC to CHW
        image = image / 255.
        images[i] = image
    log.info("Batch size is {}".format(n))

    print("@@@@@@")
    print(images[0].shape)  # (1, 1, 224, 224)
    print(images[0])
    print("@@@@@@")

    # Loading model to the plugin
    log.info("Loading model to the plugin")
    exec_net = plugin.load(network=net)
    del net

    # Start sync inference
    log.info("Starting inference ({} iterations)".format(args.number_iter))
    infer_time = []
    for i in range(args.number_iter):
        t0 = time()
        res = exec_net.infer(inputs={input_blob: images})
        infer_time.append((time() - t0) * 1000)
    log.info("Average running time of one iteration: {} ms".format(
        np.average(np.asarray(infer_time))))
    if args.perf_counts:
        perf_counts = exec_net.requests[0].get_perf_counts()
        log.info("Performance counters:")
        print("{:<70} {:<15} {:<15} {:<15} {:<10}".format(
            'name', 'layer_type', 'exet_type', 'status', 'real_time, us'))
        for layer, stats in perf_counts.items():
            print("{:<70} {:<15} {:<15} {:<15} {:<10}".format(
                layer, stats['layer_type'], stats['exec_type'],
                stats['status'], stats['real_time']))

    # Processing output blob
    log.info("Processing output blob")
    res = res[out_blob]

    #np.set_printoptions(threshold=np.inf)
    print("@@@@@@")
    print(res.shape)  # (1, 1, 224, 224)
    print(res)
    print("@@@@@@")

    res = np.squeeze(res)  # (1, 224, 224)
    res = np.squeeze(res)  # (224, 224)

    result = sigmoid(res)
    print("@@@@@@")
    print(result.shape)
    print(result)
    print("@@@@@@")

    result = cv2.resize(result, (original_w, original_h))  ## to 960x540
    cv2.imwrite('out_openvino.png', (result * 255).astype(np.uint8))

    del exec_net
    del plugin
Exemplo n.º 6
0
def infer_on_video(args):
    args.ct = float(args.ct)

    ### TODO: Initialize the Inference Engine
    # plugin = Network()
    # ### TODO: Load the network model into the IE
    # plugin.load_model(args.m, args.d, CPU_EXTENSION)
    # net_input_shape = plugin.get_input_shape()

    model_xml = args.m + '.xml'
    model_bin = args.m + ".bin"

    plugin = IEPlugin(device=args.d)
    if "CPU" in args.d:
        plugin.add_cpu_extension("lib/libcpu_extension.dylib")

    net = IENetwork(model=model_xml, weights=model_bin)
    input_blob = next(iter(net.inputs))
    exec_net = plugin.load(network=net)

    print("IR Sucessfully loaded into the Inference Engine")

    camera_width = 640
    camera_height = 480
    fps = ""
    framepos = 0
    frame_count = 0
    vidfps = 0
    skip_frame = 0
    elapsedTime = 0
    new_w = int(camera_width * m_input_size/camera_width)
    new_h = int(camera_height * m_input_size/camera_height)

    cap = cv2.VideoCapture(0)
    cap.set(cv2.CAP_PROP_FPS, 30)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, camera_width)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, camera_height)

    time.sleep(1)

    while cap.isOpened():
        t1 = time.time()

        ## Uncomment only when playing video files
        #cap.set(cv2.CAP_PROP_POS_FRAMES, framepos)

        ret, image = cap.read()
        if not ret:
            break

        resized_image = cv2.resize(image, (new_w, new_h), interpolation = cv2.INTER_CUBIC)
        canvas = np.full((m_input_size, m_input_size, 3), 128)
        canvas[(m_input_size-new_h)//2:(m_input_size-new_h)//2 + new_h,(m_input_size-new_w)//2:(m_input_size-new_w)//2 + new_w,  :] = resized_image
        prepimg = canvas
        prepimg = prepimg[np.newaxis, :, :, :]     # Batch size axis add
        prepimg = prepimg.transpose((0, 3, 1, 2))  # NHWC to NCHW
        outputs = exec_net.infer(inputs={input_blob: prepimg})


        objects = []

        for output in outputs.values():
            objects = ParseYOLOV3Output(output, new_h, new_w, camera_height, camera_width, 0.4, objects)

        # Filtering overlapping boxes
        objlen = len(objects)
        for i in range(objlen):
            if (objects[i].confidence == 0.0):
                continue
            for j in range(i + 1, objlen):
                if (IntersectionOverUnion(objects[i], objects[j]) >= 0.4):
                    if objects[i].confidence < objects[j].confidence:
                        objects[i], objects[j] = objects[j], objects[i]
                    objects[j].confidence = 0.0

        # Drawing boxes
        for obj in objects:
            if obj.confidence < args.ct:
                continue
            label = obj.class_id
            confidence = obj.confidence
            #if confidence >= 0.2:
            label_text = LABELS[label] + " (" + "{:.1f}".format(confidence * 100) + "%)"
            if LABELS[label] in danger:
                mixer.music.play()
            cv2.rectangle(image, (obj.xmin, obj.ymin), (obj.xmax, obj.ymax), box_color, box_thickness)
            cv2.putText(image, label_text, (obj.xmin, obj.ymin - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.6, label_text_color, 1)

        cv2.putText(image, fps, (camera_width - 170, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (38, 0, 255), 1, cv2.LINE_AA)
        cv2.imshow("Result", image)

        if cv2.waitKey(1)&0xFF == ord('q'):
            break
        elapsedTime = time.time() - t1
        fps = "(Playback) {:.1f} FPS".format(1/elapsedTime)

        ## frame skip, video file only
        #skip_frame = int((vidfps - int(1/elapsedTime)) / int(1/elapsedTime))
        #framepos += skip_frame

    cv2.destroyAllWindows()
    del net
    del exec_net
    del plugin
Exemplo n.º 7
0
def main():
    log.basicConfig(format="[ %(levelname)s ] %(message)s",
                    level=log.INFO,
                    stream=sys.stdout)
    args = build_argparser().parse_args()
    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"

    preprocess_times = collections.deque()
    infer_times = collections.deque()
    postprocess_times = collections.deque()

    ROIfile = open("ROIs.txt", "w")
    # output stored here, view with ROIviewer

    # Plugin initialization for specified device and load extensions library if specified
    log.info("Initializing plugin for {} device...".format(args.device))
    plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
    if args.cpu_extension and 'CPU' in args.device:
        plugin.add_cpu_extension(args.cpu_extension)

    # Read IR
    log.info("Reading IR...")
    net = IENetwork(model=model_xml, weights=model_bin)

    if plugin.device == "CPU":
        supported_layers = plugin.get_supported_layers(net)
        not_supported_layers = [
            l for l in net.layers.keys() if l not in supported_layers
        ]
        if len(not_supported_layers) != 0:
            log.error(
                "Following layers are not supported by the plugin for specified device {}:\n {}"
                .format(plugin.device, ', '.join(not_supported_layers)))
            log.error(
                "Please try to specify cpu extensions library path in demo's command line parameters using -l "
                "or --cpu_extension command line argument")
            sys.exit(1)

    #Set Batch Size
    batchSize = args.b
    frameLimit = args.fr
    assert len(
        net.inputs.keys()) == 1, "Demo supports only single input topologies"
    assert len(net.outputs) == 1, "Demo supports only single output topologies"
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))
    log.info("Loading IR to the plugin...")
    exec_net = plugin.load(network=net, num_requests=2)

    # Read and pre-process input image
    n, c, h, w = net.inputs[input_blob].shape
    output_dims = net.outputs[out_blob].shape
    infer_width = w
    infer_height = h
    num_channels = c
    channel_size = infer_width * infer_height
    full_image_size = channel_size * num_channels

    print("inputdims=", w, h, c, n)
    print("outputdims=", output_dims[3], output_dims[2], output_dims[1],
          output_dims[0])
    if int(output_dims[3]) > 1:
        print("SSD Mode")
        output_mode = output_mode_type.SSD_MODE
    else:
        print("Single Classification Mode")
        output_mode = CLASSIFICATION_MODE
        output_data_size = int(output_dims[2]) * int(output_dims[1]) * int(
            output_dims[0])
    del net
    if args.input == 'cam':
        input_stream = 0
    else:
        input_stream = args.input
        assert os.path.isfile(args.input), "Specified input file doesn't exist"
    if args.labels:
        with open(args.labels, 'r') as f:
            labels_map = [x.strip() for x in f]
    else:
        labels_map = None

    cap = cv2.VideoCapture(input_stream)

    cur_request_id = 0
    next_request_id = 1

    log.info("Starting inference in async mode...")
    is_async_mode = True
    render_time = 0

    framenum = 0
    process_more_frames = True
    frames_in_output = batchSize

    while process_more_frames:
        time1 = time.time()
        for mb in range(0, batchSize):
            ret, frame = cap.read()
            if not ret or (framenum >= frameLimit):
                process_more_frames = False
                frames_in_output = mb
                break

            # convert image to blob
            # Fill input tensor with planes. First b channel, then g and r channels
            in_frame = cv2.resize(frame, (w, h))
            in_frame = in_frame.transpose(
                (2, 0, 1))  # Change data layout from HWC to CHW
            in_frame = in_frame.reshape((n, c, h, w))

        time2 = time.time()
        diffPreProcess = time2 - time1
        if process_more_frames:
            preprocess_times.append(diffPreProcess * 1000)

        # Main sync point:
        # in the truly Async mode we start the NEXT infer request, while waiting for the CURRENT to complete
        # in the regular mode we start the CURRENT request and immediately wait for it's completion
        inf_start = time.time()
        if is_async_mode:
            exec_net.start_async(request_id=next_request_id,
                                 inputs={input_blob: in_frame})
        else:
            exec_net.start_async(request_id=cur_request_id,
                                 inputs={input_blob: in_frame})
        if exec_net.requests[cur_request_id].wait(-1) == 0:
            inf_end = time.time()
            det_time = inf_end - inf_start
            infer_times.append(det_time * 1000)
            time1 = time.time()

            # Parse detection results of the current request
            res = exec_net.requests[cur_request_id].outputs[out_blob]
            for obj in res[0][0]:
                # Write into ROIs.txt only objects when probability more than specified threshold
                if obj[2] > args.prob_threshold:
                    confidence = obj[2]
                    locallabel = obj[1] - 1
                    print(str(0),
                          str(framenum),
                          str(locallabel),
                          str(confidence),
                          str(obj[3]),
                          str(obj[4]),
                          str(obj[5]),
                          str(obj[6]),
                          file=ROIfile)

        sys.stdout.write("\rframenum:" + str(framenum))
        sys.stdout.flush()
        render_start = time.time()
        framenum = framenum + 1
        time2 = time.time()
        diffPostProcess = time2 - time1
        postprocess_times.append(diffPostProcess * 1000)

        if is_async_mode:
            cur_request_id, next_request_id = next_request_id, cur_request_id

    print("\n")
    preprocesstime = 0
    inferencetime = 0
    postprocesstime = 0

    for obj in preprocess_times:
        preprocesstime += obj
    for obj in infer_times:
        inferencetime += obj
    for obj in postprocess_times:
        postprocesstime += obj

    print("Preprocess: ", preprocesstime / (len(preprocess_times) * batchSize),
          "\tms/frame")
    print("Inference:  ", inferencetime / (len(infer_times) * batchSize),
          "\tms/frame")
    print("Postprocess:",
          postprocesstime / (len(postprocess_times) * batchSize), "\tms/frame")

    del exec_net
    del plugin
Exemplo n.º 8
0
class DetectorOpenVINO(detector_base.DetectorBase):
    def __init__(self, dyda_config_path='', debug=False):
        """ __init__ of DetectorOpenVINO

        Trainer Variables:
            input_data: a list of image array
            results: defined by lab_tools.output_pred_detection()

        Arguments:
            dyda_config_path -- Trainer config filepath
        """
        if debug:
            logger.setLevel(logging.DEBUG)
        else:
            logger.setLevel(logging.INFO)

        # Setup dyda config
        super(DetectorOpenVINO,
              self).__init__(dyda_config_path=dyda_config_path)
        self.set_param(self.class_name)

        self.check_param_keys()

        if "threshold" in self.param.keys():
            self.threshold = self.param["threshold"]
        else:
            self.threshold = 0.3

        # Setup DL model
        model_xml = self.param['model_description']
        model_bin = self.param['model_file']
        with open(self.param['label_file'], 'r') as f:
            self.labels_map = [x.strip() for x in f]

        # Setup OpenVINO
        #
        # Plugin initialization for specified device and
        # load extensions library if specified
        #
        # Note: MKLDNN CPU-targeted custom layer support is not included
        #       because we do not use it yet.
        self.plugin = IEPlugin(device=self.param['device'],
                               plugin_dirs=self.param['plugin_dirs'])
        if self.param['device'] == 'CPU':
            for ext in self.param['cpu_extensions']:
                logger.info('Add cpu extension: {}'.format(ext))
                self.plugin.add_cpu_extension(ext)
        logger.debug("Computation device: {}".format(self.param['device']))

        # Read IR
        logger.debug("Loading network files:\n\t{}\n\t{}".format(
            model_xml, model_bin))
        net = IENetwork(model=model_xml, weights=model_bin)

        if self.plugin.device == "CPU":
            supported_layers = self.plugin.get_supported_layers(net)
            not_supported_layers = [
                l for l in net.layers.keys() if l not in supported_layers
            ]
            if len(not_supported_layers) != 0:
                logger.error(
                    ('Following layers are not supported '
                     'by the plugin for specified device {}:\n {}').format(
                         self.plugin.device, ', '.join(not_supported_layers)))
                logger.error("Please try to specify cpu "
                             "extensions library path in demo's "
                             "command line parameters using -l "
                             "or --cpu_extension command line argument")
                sys.exit(1)

        assert len(net.inputs.keys()) == 1, (
            'Demo supports only single input topologies')
        assert len(
            net.outputs) == 1, ('Demo supports only single output topologies')

        # input_blob and and out_blob are the layer names in string format.
        logger.debug("Preparing input blobs")
        self.input_blob = next(iter(net.inputs))
        self.out_blob = next(iter(net.outputs))

        self.n, self.c, self.h, self.w = net.inputs[self.input_blob].shape

        # Loading model to the plugin
        self.exec_net = self.plugin.load(network=net, num_requests=2)

        del net

        # Initialize engine mode: sync or async
        #
        # FIXME: async mode does not work currently.
        #        process_input needs to provide two input tensors for async.
        self.is_async_mode = False
        self.cur_request_id = 0
        self.next_request_id = 1

    def __delete__(self, instance):
        del self.exec_net
        del self.plugin

    # def create_labinfo(self, results):
    #     """Create DT42 labinfo based on def in spec"""
    #     orders = results.argsort()[::-1]
    #     labinfo = {'classifier': {}}
    #     for i in range(0, len(orders)):
    #         index = orders[i]
    #         labinfo['classifier'][self.labels[index]] = results[index]
    #     return orders, labinfo

    def check_param_keys(self):
        """
        Check if any default key is missing in the self.param.
        """
        default_keys = [
            "model_file", "model_description", "label_file", "device"
        ]
        for _key in default_keys:
            if _key not in self.param.keys():
                logger.error("%s missing in self.param" % _key)
                self.terminate_flag = True
            else:
                continue
        logger.debug("keys of self.param are checked")

    def process_input(self, tensor, next_tensor=None):
        frame = tensor
        next_frame = next_tensor

        # original input shape will be used in process_output
        self.img_w = tensor.shape[1]
        self.img_h = tensor.shape[0]

        # Main sync point:
        # in the truly Async mode we start the NEXT infer request,
        # while waiting for the CURRENT to complete
        # in the regular mode we start the CURRENT request and immediately
        # wait for it's completion
        if self.is_async_mode:
            in_frame = cv2.resize(next_frame, (self.w, self.h))
            # Change data layout from HWC to CHW
            in_frame = in_frame.transpose((2, 0, 1))
            in_frame = in_frame.reshape((self.n, self.c, self.h, self.w))
        else:
            in_frame = cv2.resize(frame, (self.w, self.h))
            # Change data layout from HWC to CHW
            in_frame = in_frame.transpose((2, 0, 1))
            in_frame = in_frame.reshape((self.n, self.c, self.h, self.w))
        return in_frame

    def inference(self, tensor):
        inf_start = time()
        if self.is_async_mode:
            self.exec_net.start_async(request_id=self.next_request_id,
                                      inputs={self.input_blob: tensor})
        else:
            self.exec_net.start_async(request_id=self.cur_request_id,
                                      inputs={self.input_blob: tensor})

        if self.exec_net.requests[self.cur_request_id].wait(-1) == 0:
            inf_end = time()
            det_time = inf_end - inf_start
            if self.is_async_mode:
                logger.debug(r'Inference time: N\A for async mode')
            else:
                logger.debug("Inference time: {:.3f} ms".format(det_time *
                                                                1000))

            # Parse detection results of the current request
            res = self.exec_net.requests[self.cur_request_id].outputs[
                self.out_blob]
        else:
            res = None

        return res

        # FIXME: async mode does not work currently.
        #        process_input needs to provide two input tensors for async.
        if self.is_async_mode:
            self.cur_request_id, self.next_request_id = \
                self.next_request_id, self.cur_request_id
            frame = next_frame

    def process_output(self, output):

        logger.debug("Processing output blob")
        logger.debug("Threshold: {} ".format(self.threshold))

        annotations = []

        for obj in output[0][0]:
            # Collect objects when probability more than specified threshold
            if obj[2] > self.threshold:
                xmin = int(obj[3] * self.img_w)
                ymin = int(obj[4] * self.img_h)
                xmax = int(obj[5] * self.img_w)
                ymax = int(obj[6] * self.img_h)
                class_id = int(obj[1])
                if self.labels_map:
                    det_label = self.labels_map[class_id]
                else:
                    str(class_id)
                annotations.append({
                    'label': det_label,
                    'confidence': float(obj[2]),
                    'left': xmin,
                    'top': ymin,
                    'right': xmax,
                    'bottom': ymax
                })
        logger.debug('process_output return: {}'.format(annotations))
        return annotations

    def main_process(self):
        if len(self.input_data) == 0:
            logger.error('no input_data found')
            self.terminate_flag = True

        logger.debug('self.input_data len: {}'.format(len(self.input_data)))
        for img_array in self.input_data:
            orig_h, orig_w = img_array.shape[:-1]
            img_array = self.process_input(img_array)
            inf_results = self.inference(img_array)
            det_results = self.process_output(inf_results)

            # why this code looks so redundant here is beacause that
            # this script is modified from berrynet openvino_engine.py,
            # and I follow the principle that make least change of original
            # code. the annotations structure in process_output is original
            # defined in berrynet, and in order to use the lab_tools, we
            # must re-define the structure
            annotations = [[
                det_result['label'], det_result['confidence'],
                [
                    det_result['top'], det_result['bottom'],
                    det_result['left'], det_result['right']
                ]
            ] for det_result in det_results]

            # orders, labinfo = self.create_labinfo(inf_results)
            res = lab_tools.output_pred_detection(
                # input_path=self.orig_input_path,
                input_path='',
                annotations=annotations,
                img_size=(orig_h, orig_w),
                # labinfo=labinfo
                labinfo={})
            self.results.append(res)
        logger.debug('self.results: {}'.format(self.results))
Exemplo n.º 9
0
def main():

    log.basicConfig(format="[ %(levelname)s ] %(message)s",
                    level=log.INFO,
                    stream=sys.stdout)
    args = build_argparser().parse_args()

    # Plugin initialization for specified device and
    #     load extensions library if specified
    plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
    if args.cpu_extension and "CPU" in args.device:
        plugin.add_cpu_extension(args.cpu_extension)

    # Read IR
    # If using MYRIAD then we need to load FP16 model version
    model_xml, model_bin = load_model(args.device == "MYRIAD")

    log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
    net = IENetwork(model=model_xml, weights=model_bin)
    # net = IENetwork.from_ir(model=model_xml, weights=model_bin) # Old API
    """
    This code checks to see if all of the graphs in the IR are
    compatible with OpenVINO. If not, then you'll need to probably
    try to load in an extension library from ${INTEL_CVSDK_DIR}/inference_engine/lib
    """
    if "CPU" in plugin.device:
        supported_layers = plugin.get_supported_layers(net)
        not_supported_layers = [
            l for l in net.layers.keys() if l not in supported_layers
        ]
        if len(not_supported_layers) != 0:
            log.error("Following layers are not supported by the plugin "
                      " for specified device {}:\n {}".format(
                          plugin.device, ", ".join(not_supported_layers)))
            log.error("Please try to specify cpu extensions library path "
                      "in sample's command line parameters using -l "
                      "or --cpu_extension command line argument")
            log.error(
                "On CPU this is usually -l ${INTEL_CVSDK_DIR}/inference_engine/lib/centos_7.4/intel64/libcpu_extension_avx2.so"
            )
            log.error(
                "You may need to build the OpenVINO samples directory for this library to be created on your system."
            )
            log.error(
                "e.g. bash ${INTEL_CVSDK_DIR}/inference_engine/samples/build_samples.sh will trigger the library to be built."
            )
            log.error(
                "Replace 'centos_7.4' with the pathname on your computer e.g. ('ubuntu_16.04')"
            )
            sys.exit(1)

    assert len(
        net.inputs.keys()) == 1, "Sample supports only single input topologies"
    assert len(
        net.outputs) == 1, "Sample supports only single output topologies"
    """
    Ask OpenVINO for input and output tensor names and sizes
    """
    input_blob = next(iter(net.inputs))  # Name of the input layer
    out_blob = next(iter(net.outputs))  # Name of the output layer

    batch_size, n_channels, height, width = net.inputs[input_blob].shape
    batch_size, n_out_channels, height_out, width_out = net.outputs[
        out_blob].shape
    net.batch_size = batch_size

    # Load data
    input_data, label_data, img_indicies = load_data()

    # Loading model to the plugin
    exec_net = plugin.load(network=net)
    del net

    if args.stats:
        # Print the latency and throughput for inference
        print_stats(exec_net, input_data, n_channels, batch_size, input_blob,
                    out_blob, args)
    """
    OpenVINO inference code
    input_blob is the name (string) of the input tensor in the graph
    out_blob is the name (string) of the output tensor in the graph
    Essentially, this looks exactly like a feed_dict for TensorFlow inference
    """
    # Go through the sample validation dataset to plot predictions
    predictions = np.zeros(
        (img_indicies.shape[0], n_out_channels, height_out, width_out))

    for idx in range(0, img_indicies.shape[0], batch_size):

        res = exec_net.infer(
            inputs={
                input_blob: input_data[idx:(idx + batch_size), :n_channels]
            })

        # Save the predictions to array
        predictions[idx:(idx + batch_size), ] = res[out_blob]

    if idx != (len(img_indicies) - 1):  # Partial batch left in data
        log.info("Partial batch left over in dataset.")
    """
    Evaluate model with Dice metric
    """
    for idx in range(img_indicies.shape[0]):
        dice = dice_score(predictions[idx, 0, :, :], label_data[idx, 0, :, :])
        log.info("Image #{}: Dice score = {:.4f}".format(
            img_indicies[idx], dice))

    if args.plot:
        plot_predictions(predictions, input_data, label_data, img_indicies,
                         args)

    del exec_net
    del plugin
Exemplo n.º 10
0
#!/usr/bin/env python3

from openvino.inference_engine import IENetwork, IEPlugin
import cv2
import os
import time

image_path = "/opt/intel/openvino/deployment_tools/demo/car.png"

model_dir = "/opt/intel/openvino/inference_engine/samples/python_samples/object_detection_demo_ssd_async/"
model_xml = model_dir + "VGG_VOC0712Plus_SSD_300x300_ft_iter_160000.xml"
model_bin = model_dir + "VGG_VOC0712Plus_SSD_300x300_ft_iter_160000.bin"

plugin_dir = "/opt/intel/openvino/inference_engine/lib/intel64"
cpu_extension = "/opt/intel/openvino/inference_engine/lib/intel64/libcpu_extension_sse4.so"
plugin = IEPlugin(device="CPU", plugin_dirs=plugin_dir)
plugin.add_cpu_extension(cpu_extension)
net = IENetwork(model=model_xml, weights=model_bin)
input_blob = next(iter(net.inputs))
out_blob = next(iter(net.outputs))
n, c, h, w = net.inputs[input_blob].shape
exec_net = plugin.load(network=net, num_requests=1)


start_time = time.time()
image_number = 200
for i in range(1, 1 + image_number):
    image = cv2.imread(image_path)
    image = cv2.resize(image, (w, h))
    image = image.transpose((2, 0, 1))
    image = image.reshape((n, c, h, w))
Exemplo n.º 11
0
def main():
    log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
    args = build_argparser().parse_args()
    assert args.device.split(':')[0] == "HETERO", "This demo supports only Hetero Plugin. " \
                                                  "Please specify correct device, e.g. HETERO:FPGA,CPU"
    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"

    # Plugin initialization for specified device and load extensions library if specified
    plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
    if args.cpu_extension and 'CPU' in args.device:
        plugin.add_cpu_extension(args.cpu_extension)
    # Read IR
    net = IENetwork(model=model_xml, weights=model_bin)

    if plugin.device == "CPU":
        supported_layers = plugin.get_supported_layers(net)
        not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]
        if len(not_supported_layers) != 0:
            log.error("Following layers are not supported by the plugin for specified device {}:\n {}".
                      format(plugin.device, ', '.join(not_supported_layers)))
            log.error("Please try to specify cpu extensions library path in demo's command line parameters using -l "
                      "or --cpu_extension command line argument")
            sys.exit(1)
    net_ops = set([l.type for l in net.layers.values()])
    if not any([op == "Convolution" for op in net_ops]):
        log.warning("Specified IR doesn't contain any Convolution operations for which affinity going to be set.\n"
                    "Try to use another topology to make the affinity setting result more visible.")

    # Configure the plugin to initialize default affinity for network in set_initial_affinity() function.
    plugin.set_config({"TARGET_FALLBACK": args.device.split(':')[1]})
    # Enable graph visualization
    plugin.set_config({"HETERO_DUMP_GRAPH_DOT": "YES"})
    plugin.set_initial_affinity(net)

    for l in net.layers.values():
        if l.type == "Convolution":
            l.affinity = "GPU"

    assert len(net.inputs.keys()) == 1, "Demo supports only single input topologies"
    assert len(net.outputs) == 1, "Demo supports only single output topologies"
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))
    # Read and pre-process input image
    n, c, h, w = net.inputs[input_blob].shape
    image = cv2.imread(args.input)
    image = cv2.resize(image, (w, h))
    image = image.transpose((2, 0, 1))  # Change data layout from HWC to CHW
    image = image.reshape((n, c, h, w))
    # Load network to the plugin
    exec_net = plugin.load(network=net)
    del net
    # Start sync inference
    res = exec_net.infer(inputs={input_blob: image})
    top_ind = np.argsort(res[out_blob], axis=1)[0, -args.number_top:][::-1]
    for i in top_ind:
        log.info("%f #%d" % (res[out_blob][0, i], i))
    del exec_net
    del plugin
    cwd = os.getcwd()
    log.info(
        "Graphs representing default and resulting affinities dumped to {} and {} files respectively"
        .format(os.path.join(cwd, 'hetero_affinity.dot'), os.path.join(cwd, 'hetero_subgraphs.dot'))
    )
def main():
  log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
  args = build_argparser().parse_args()
  model_xml = args.model
  model_bin = os.path.splitext(model_xml)[0] + ".bin"
  # Plugin initialization for specified device and load extensions library if specified
  log.info("Initializing plugin for {} device...".format(args.device))
  plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
  if args.cpu_extension and 'CPU' in args.device:
    log.info("Loading plugins for {} device...".format(args.device))
    plugin.add_cpu_extension(args.cpu_extension)

  # Read IR
  log.info("Reading IR...")
  net = IENetwork(model=model_xml, weights=model_bin)

  if plugin.device == "CPU":
    supported_layers = plugin.get_supported_layers(net)
    not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]
    if len(not_supported_layers) != 0:
      log.error("Following layers are not supported by the plugin for specified device {}:\n {}".
                format(plugin.device, ', '.join(not_supported_layers)))
      log.error("Please try to specify cpu extensions library path in sample's command line parameters using -l "
                "or --cpu_extension command line argument")
      sys.exit(1)
  assert len(net.inputs.keys()) == 1, "Sample supports only single input topologies"
  assert len(net.outputs) == 1, "Sample supports only single output topologies"
  input_blob = next(iter(net.inputs))
  out_blob = next(iter(net.outputs))
  log.info("Loading IR to the plugin...")
  exec_net = plugin.load(network=net, num_requests=2)
  # Read and pre-process input image
  if isinstance(net.inputs[input_blob], list):
    n, c, h, w = net.inputs[input_blob]
  else:
    n, c, h, w = net.inputs[input_blob].shape
  del net

  out_file_name = os.path.splitext(os.path.basename(args.input))[0]

  if args.output_dir:
    out_path = os.path.join(args.output_dir, out_file_name+'.mp4')

  if args.labels:
    with open(args.labels, 'r') as f:
      labels_map = [x.strip() for x in f]
  else:
    labels_map = None


  cur_request_id = 0
  next_request_id = 1
  
  job_id = os.environ['PBS_JOBID']
  inf_progress_file_path = os.path.join(args.output_dir,'i_progress_'+str(job_id)+'.txt')
  ren_progress_file_path = os.path.join(args.output_dir,'v_progress_'+str(job_id)+'.txt')

  is_async_mode = True
  fps_sum = 0
  frame_count = 0
  read_time = 0
  result_list = []
  try:
    # Set up the capture stream
    input_stream = args.input
    cap = cv2.VideoCapture(input_stream)
    if cap.isOpened():
      video_len = args.count
      log.info("Live stream running at {} fps".format(cv2.CAP_PROP_FPS))
      initial_w = cap.get(3)
      initial_h = cap.get(4)
      cap.set(cv2.CAP_PROP_BUFFERSIZE, 3)

      log.info("Starting inference in async mode...")
      infer_time_start = time.time()
      while frame_count < video_len:
        ret, frame = cap.read()
        if not ret:
          break
  
        in_frame = cv2.resize(frame, (w, h))
        in_frame = in_frame.transpose((2, 0, 1))  # Change data layout from HWC to CHW
        in_frame = in_frame.reshape((n, c, h, w))
        # Main sync point:
        # in the truly Async mode we start the NEXT infer request, while waiting for the CURRENT to complete
        # in the regular mode we start the CURRENT request and immediately wait for it's completion
        inf_start = time.time()
        if is_async_mode:
          exec_net.start_async(request_id=next_request_id, inputs={input_blob: in_frame})
        else:
          exec_net.start_async(request_id=cur_request_id, inputs={input_blob: in_frame})
  
        read_time_0 = time.time()
        if exec_net.requests[cur_request_id].wait(-1) == 0:
          inf_end = time.time()
          det_time = inf_end - inf_start
          #Parse detection results of the current request
          res = exec_net.requests[cur_request_id].outputs[out_blob]
          processBoxes(frame_count, res, labels_map, args.prob_threshold, frame, result_list, det_time)
        read_time += time.time() - read_time_0
    
        frame_count+=1
        #Write data to progress tracker
        if frame_count%10 == 0: 
          progressUpdate(inf_progress_file_path, time.time()-infer_time_start, frame_count, video_len) 
  
        key = cv2.waitKey(1)
        if key == 27:
          break
        if (9 == key):
          is_async_mode = not is_async_mode
          log.info("Switched to {} mode".format("async" if is_async_mode else "sync"))
        if is_async_mode:
          cur_request_id, next_request_id = next_request_id, cur_request_id
  
      ##End while loop /
      cap.release()
      log.info("{} seconds were spent reading".format(read_time))

    if args.output_dir is None:
      cv2.destroyAllWindows()
    else:
      total_time = time.time() - infer_time_start
      with open(os.path.join(args.output_dir, 'stats_'+str(job_id)+'.txt'), 'w') as f:
        f.write(str(round(total_time, 1))+'\n')
        f.write(str(frame_count)+'\n')

    o_video = os.path.join(args.output_dir, 'output_'+str(job_id)+'.mp4')
    post_process_t = time.time()
    postProcess(result_list, int(initial_w), int(initial_h), labels_map, o_video, is_async_mode, ren_progress_file_path)
    log.info("Post processing time: {0} sec" .format(time.time()-post_process_t))

  finally:
    del exec_net
    del plugin
Exemplo n.º 13
0
def main():
    log.basicConfig(format="[ %(levelname)s ] %(message)s",
                    level=log.INFO,
                    stream=sys.stdout)
    args = build_argparser().parse_args()
    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"

    # Plugin initialization for specified device and load extensions library if specified
    plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
    if args.cpu_extension and 'CPU' in args.device:
        plugin.add_cpu_extension(args.cpu_extension)
    # Read IR
    log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
    net = IENetwork(model=model_xml, weights=model_bin)

    if plugin.device == "CPU":
        supported_layers = plugin.get_supported_layers(net)
        not_supported_layers = [
            l for l in net.layers.keys() if l not in supported_layers
        ]
        if len(not_supported_layers) != 0:
            log.error(
                "Following layers are not supported by the plugin for specified device {}:\n {}"
                .format(plugin.device, ', '.join(not_supported_layers)))
            log.error(
                "Please try to specify cpu extensions library path in sample's command line parameters using -l "
                "or --cpu_extension command line argument")
            sys.exit(1)
    assert len(
        net.inputs.keys()) == 1, "Sample supports only single input topologies"
    assert len(
        net.outputs) == 1, "Sample supports only single output topologies"

    log.info("Preparing input blobs")
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))
    net.batch_size = len(args.input)

    # Read and pre-process input images
    n, c, h, w = net.inputs[input_blob].shape
    images = np.ndarray(shape=(n, c, h, w))
    for i in range(n):
        image = cv2.imread(args.input[i])
        if image.shape[:-1] != (h, w):
            log.warning("Image {} is resized from {} to {}".format(
                args.input[i], image.shape[:-1], (h, w)))
            image = cv2.resize(image, (w, h))
        image = image.transpose(
            (2, 0, 1))  # Change data layout from HWC to CHW
        images[i] = image
    log.info("Batch size is {}".format(n))

    # Loading model to the plugin
    log.info("Loading model to the plugin")
    exec_net = plugin.load(network=net)

    # Start sync inference
    log.info("Starting inference ({} iterations)".format(args.number_iter))
    infer_time = []
    for i in range(args.number_iter):
        t0 = time()
        res = exec_net.infer(inputs={input_blob: images})
        infer_time.append((time() - t0) * 1000)
    log.info("Average running time of one iteration: {} ms".format(
        np.average(np.asarray(infer_time))))
    if args.perf_counts:
        perf_counts = exec_net.requests[0].get_perf_counts()
        log.info("Performance counters:")
        print("{:<70} {:<15} {:<15} {:<15} {:<10}".format(
            'name', 'layer_type', 'exet_type', 'status', 'real_time, us'))
        for layer, stats in perf_counts.items():
            print("{:<70} {:<15} {:<15} {:<15} {:<10}".format(
                layer, stats['layer_type'], stats['exec_type'],
                stats['status'], stats['real_time']))
    # Processing output blob
    log.info("Processing output blob")
    res = res[out_blob]
    _, _, out_h, out_w = res.shape
    for batch, data in enumerate(res):
        classes_map = np.zeros(shape=(out_h, out_w, 3), dtype=np.int)
        for i in range(out_h):
            for j in range(out_w):
                if len(data[:, i, j]) == 1:
                    pixel_class = int(data[:, i, j])
                else:
                    pixel_class = np.argmax(data[:, i, j])
                classes_map[i, j, :] = classes_color_map[min(pixel_class, 20)]
        out_img = os.path.join(os.path.dirname(__file__),
                               "out_{}.bmp".format(batch))
        cv2.imwrite(out_img, classes_map)
        log.info("Result image was saved to {}".format(out_img))
import sys
import os
#print(os.path.exists("/opt/intel/openvino/bin/setupvars.sh"))
import subprocess
exec(open("/opt/intel/openvino/bin/setupvars.sh").read())
from openvino.inference_engine import IENetwork, IEPlugin
import numpy as np
from skimage.transform import resize
from skimage.io import imread, imsave
import sys

# load the intel optimized model for inference
model_xml = sys.argv[1]
model_bin = sys.argv[2]
plugin = IEPlugin("CPU", plugin_dirs=None)

# Build Inference Engine network using xml and bin files
net = IENetwork(model=model_xml, weights=model_bin)
# Establish input and output blobs (images)
input_blob = next(iter(net.inputs))
out_blob = next(iter(net.outputs))
exec_net = plugin.load(network=net)
del net

#preprocess image
#    resize to 512,512
#    normalize it to 0-1
#    transpose and reshape image channel format as openvino IE engine requires it as n,c,h,w

fileName = '/workspace/Usecases_Code/Image_Segmentation/training_512/0cdf5b5d0ce1_01.jpg'
img = imread(fileName)
def main():
  log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
  args = build_argparser().parse_args()
  model_xml = args.model
  model_bin = os.path.splitext(model_xml)[0] + ".bin"
  # Plugin initialization for specified device and load extensions library if specified
  log.info("Initializing plugin for {} device...".format(args.device))
  plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
  if args.cpu_extension and 'CPU' in args.device:
    plugin.add_cpu_extension(args.cpu_extension)

  # Read IR
  log.info("Reading IR...")
  net = IENetwork.from_ir(model=model_xml, weights=model_bin)

  if "CPU" in plugin.device:
    supported_layers = plugin.get_supported_layers(net)
    not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]
    if len(not_supported_layers) != 0:
      log.error("Following layers are not supported by the plugin for specified device {}:\n {}".
                format(plugin.device, ', '.join(not_supported_layers)))
      log.error("Please try to specify cpu extensions library path in sample's command line parameters using -l "
                "or --cpu_extension command line argument")
      sys.exit(1)
  assert len(net.inputs.keys()) == 1, "Sample supports only single input topologies"
  assert len(net.outputs) == 1, "Sample supports only single output topologies"
  input_blob = next(iter(net.inputs))
  out_blob = next(iter(net.outputs))
  log.info("Loading IR to the plugin...")
  exec_net = plugin.load(network=net, num_requests=2)
  # Read and pre-process input image
  n, c, h, w = net.inputs[input_blob]
  del net

  predictions = []
  data = Input(args.input_type, args.input)
  cur_request_id = 0

  fps = 25
  out_width = 640
  out_height = 480
  if args.dump_output_video:
    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    out = cv2.VideoWriter(args.path_to_output_video, fourcc, fps, (int(out_width), int(out_height)))

  while not data.is_finished():
    frame, img_id = data.get_next_item()
    initial_h, initial_w, channels = frame.shape
    in_frame = cv2.resize(frame, (w, h))
    in_frame = in_frame.transpose((2, 0, 1))  # Change data layout from HWC to CHW
    in_frame = in_frame.reshape((n, c, h, w))

    exec_net.start_async(request_id=cur_request_id, inputs={input_blob: in_frame})
    if exec_net.requests[cur_request_id].wait(-1) == 0:

      # Parse detection results of the current request
      res = exec_net.requests[cur_request_id].outputs[out_blob]
      coco_detections = []
      for obj in res[0][0]:
        # Draw only objects when probability more than specified threshold
        if obj[2] > args.prob_threshold:
          x1 = float(obj[3] * initial_w)
          y1 = float(obj[4] * initial_h)
          x2 = float(obj[5] * initial_w)
          y2 = float(obj[6] * initial_h)

          x_, y_ = round(x1, 1), round(y1, 1)
          w_ = round(x2 - x1, 1)
          h_ = round(y2 - y1, 1)
          class_id = int(obj[1])

          coco_det = {}
          coco_det['image_id'] = img_id
          coco_det['category_id'] = class_id
          coco_det['bbox'] = [x_, y_, w_, h_]
          coco_det['score'] = round(float(obj[2]), 1)
          coco_detections.append(coco_det)

          # Draw box and label\class_id
          cv2.rectangle(frame, (int(x1), int(y1)), (int(x2), int(y2)), (255, 0, 0), 2)
          cv2.putText(frame, str(class_id) + ' ' + str(round(obj[2] * 100, 1)) + ' %', (int(x1), int(y1) - 7),
                      cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
      predictions.extend(coco_detections)

    if args.dump_output_video:
      img_resized = cv2.resize(frame, (out_width, out_height))
      out.write(img_resized)
    if args.show:
      cv2.imshow("Detection Results", frame)
      key = cv2.waitKey(10)
      if key == 27:
        break

  if args.dump_predictions_to_json:
    with open(args.output_json_path, 'w') as output_file:
      json.dump(predictions, output_file, sort_keys=True, indent=4)

  cv2.destroyAllWindows()
  del exec_net
  del plugin
    def load_model(self,
                   model,
                   device,
                   input_size,
                   output_size,
                   num_requests,
                   cpu_extension=None,
                   plugin=None):
        """
         Loads a network and an image to the Inference Engine plugin.
        :param model: .xml file of pre trained model
        :param cpu_extension: extension for the CPU device
        :param device: Target device
        :param input_size: Number of input layers
        :param output_size: Number of output layers
        :param num_requests: Index of Infer request value. Limited to device capabilities.
        :param plugin: Plugin for specified device
        :return:  Shape of input layer
        """

        model_xml = model
        model_bin = os.path.splitext(model_xml)[0] + ".bin"
        # Plugin initialization for specified device
        # and load extensions library if specified
        if not plugin:
            log.info("Initializing plugin for {} device...".format(device))
            self.plugin = IEPlugin(device=device)
        else:
            self.plugin = plugin

        if cpu_extension and 'CPU' in device:
            self.plugin.add_cpu_extension(cpu_extension)

        # Read IR
        log.info("Reading IR...")
        self.core = IECore()
        self.net = IENetwork(model=model_xml, weights=model_bin)
        #supported_layers = self.core.query_network(self.net, device)
        log.info("Loading IR to the plugin...")

        #self.net_plugin2 = self.core.load_network(self.net, device)

        # if self.plugin.device == "CPU":
        #     supported_layers = self.plugin.get_supported_layers(self.net)
        #     not_supported_layers = \
        #         [l for l in self.net.layers.keys() if l not in supported_layers]
        #     if len(not_supported_layers) != 0:
        #         log.error("Following layers are not supported by "
        #                   "the plugin for specified device {}:\n {}".
        #                   format(self.plugin.device,
        #                          ', '.join(not_supported_layers)))
        #         log.error("Please try to specify cpu extensions library path"
        #                   " in command line parameters using -l "
        #                   "or --cpu_extension command line argument")
        #         sys.exit(1)

        if num_requests == 0:
            # Loads network read from IR to the plugin
            self.net_plugin = self.plugin.load(network=self.net)
        else:
            self.net_plugin = self.plugin.load(network=self.net,
                                               num_requests=num_requests)

        self.input_blob = next(iter(self.net.inputs))
        self.out_blob = next(iter(self.net.outputs))

        assert len(self.net.inputs.keys()) == input_size, \
            "Supports only {} input topologies".format(len(self.net.inputs))
        assert len(self.net.outputs) == output_size, \
            "Supports only {} output topologies".format(len(self.net.outputs))

        return self.plugin, self.get_input_shape()
def main():
    log.basicConfig(format="[ %(levelname)s ] %(message)s",
                    level=log.INFO,
                    stream=sys.stdout)
    args = build_argparser().parse_args()
    model_xml_xml = args.model
    model_xml_bin = os.path.splitext(model_xml_xml)[0] + ".bin"
    model_va_xml = args.model_va
    model_va_bin = os.path.splitext(model_va_xml)[0] + ".bin"
    model_lpr_xml = args.model_lpr
    model_lpr_bin = os.path.splitext(model_lpr_xml)[0] + ".bin"
    # Plugin initialization for specified device and load extensions library if specified
    log.info("Initializing plugin for {} device...".format(args.device))
    plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
    #If you are running in CPU, Please uncomment the below lines.
    if args.cpu_extension and 'CPU' in args.device:
        plugin.add_cpu_extension(args.cpu_extension)
    log.info("Reading IR...")
    net_recognition = IENetwork(model=model_xml_xml, weights=model_xml_bin)
    #If you are running in CPU, Please uncomment the below lines.
    if plugin.device == "CPU":
        supported_layers = plugin.get_supported_layers(net_recognition)
        not_supported_layers = [
            l for l in net_recognition.layers.keys()
            if l not in supported_layers
        ]
        if len(not_supported_layers) != 0:
            log.error(
                "Following layers are not supported by the plugin for specified device {}:\n {}"
                .format(plugin.device, ', '.join(not_supported_layers)))
            log.error(
                "Please try to specify cpu extensions library path in demo's command line parameters using -l "
                "or --cpu_extension command line argument")
            sys.exit(1)
    assert len(net_recognition.inputs.keys()
               ) == 1, "Demo supports only single input topologies"
    assert len(net_recognition.outputs
               ) == 1, "Demo supports only single output topologies"
    input_blob = next(iter(net_recognition.inputs))
    out_blob = next(iter(net_recognition.outputs))
    log.info("Loading IR to the plugin...")
    exec_net_recognition = plugin.load(network=net_recognition, num_requests=2)
    # Read and pre-process input image
    n, c, h, w = net_recognition.inputs[input_blob].shape
    print("	n, c, h, w ", n, c, h, w)
    del net_recognition
    if args.input == 'cam':
        input_stream = 0
    else:
        input_stream = args.input
        assert os.path.isfile(args.input), "Specified input file doesn't exist"
    if args.labels:
        with open(args.labels, 'r') as f:
            labels_map = [x.strip() for x in f]
    else:
        labels_map = None
    cap = cv2.VideoCapture(input_stream)
    cur_request_id = 0
    next_request_id = 1

    log.info("Starting inference in async mode...")
    log.info("To switch between sync and async modes press Tab button")
    log.info("To stop the demo execution press Esc button")
    is_async_mode = False
    render_time = 0
    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break
        next_frame = frame
        initial_w = frame.shape[0]
        initial_h = frame.shape[1]
        # Main sync point:
        # in the truly Async mode we start the NEXT infer request, while waiting for the CURRENT to complete
        # in the regular mode we start the CURRENT request and immediately wait for it's completion
        inf_start = time.time()
        if is_async_mode:
            in_frame = cv2.resize(frame, (w, h))
            in_frame = in_frame.transpose(
                (2, 0, 1))  # Change data layout from HWC to CHW
            in_frame = in_frame.reshape((n, c, h, w))
            exec_net_recognition.start_async(request_id=next_request_id,
                                             inputs={input_blob: in_frame})
        else:
            in_frame = cv2.resize(frame, (w, h))
            in_frame = in_frame.transpose(
                (2, 0, 1))  # Change data layout from HWC to CHW
            in_frame = in_frame.reshape((n, c, h, w))
            exec_net_recognition.start_async(request_id=cur_request_id,
                                             inputs={input_blob: in_frame})
        if exec_net_recognition.requests[cur_request_id].wait(-1) == 0:
            inf_end = time.time()
            det_time = inf_end - inf_start
            # Parse detection results of the current request
            res = exec_net_recognition.requests[cur_request_id].outputs[
                out_blob]

            for obj in res[0][0]:
                # Draw only objects when probability more than specified threshold
                if obj[2] > args.prob_threshold:
                    if obj[2] == 1:
                        xmin = int(obj[3] * initial_h)
                        ymin = int(obj[4] * initial_w)
                        xmax = int(obj[5] * initial_h)
                        ymax = int(obj[6] * initial_w)
                        class_id_vehicle = int(obj[1])
                        # Draw box and label\class_id
                        color = (min(class_id_vehicle * 12.5,
                                     10), min(class_id_vehicle * 7,
                                              10), min(class_id_vehicle * 5,
                                                       10))
                        cv2.rectangle(frame, (xmin, ymin), (xmax, ymax),
                                      (0, 255, 255), 2)
                        crop_vehicle = frame[ymin:ymax, xmin:xmax]
                        crop_img = crop_vehicle
                        net_va_recognition = IENetwork(model=model_va_xml,
                                                       weights=model_va_bin)
                        input_blob_va = next(iter(net_va_recognition.inputs))
                        out_blob_va = next(iter(net_va_recognition.outputs))
                        log.info("Loading IR to the plugin...")
                        exec_net_va_recog = plugin.load(
                            network=net_va_recognition, num_requests=2)
                        n_va, c_va, h_va, w_va = net_va_recognition.inputs[
                            input_blob_va].shape
                        if is_async_mode == False:
                            if crop_img.any():
                                in_frame_va = cv2.resize(
                                    crop_img, (w_va, h_va))
                            else:
                                in_frame_va = cv2.resize(frame, (w_va, h_va))
                            in_frame_va = in_frame_va.transpose(
                                (2, 0,
                                 1))  # Change data layout from HWC to CHW
                            in_frame_va = in_frame_va.reshape(
                                (n_va, c_va, h_va, w_va))
                            res_va = exec_net_va_recog.infer(
                                inputs={input_blob_va: in_frame_va})

# Processing output blob
                        if exec_net_recognition.requests[cur_request_id].wait(
                                -1) == 0:
                            lis_res_color = []
                            log.info("Processing output blob")
                            res_colord = res_va["color"]
                            for l_i in np.nditer(res_colord):
                                val1 = int(l_i * 100)
                                lis_res_color.append(val1)
                            labels_map_list_color = [
                                "white", "gray", "yellow", "red", "green",
                                "blue", "black"
                            ]
                            max_color_value, max_color_index = maximum(
                                lis_res_color, len(lis_res_color))
                            class_id_va = max_color_index
                            det_label_va = labels_map_list_color[class_id_va]
                            lis_res_type = []
                            res_type = res_va["type"]
                            for type_i in np.nditer(res_type):
                                val1_type_i = int(type_i * 100)
                                lis_res_type.append(val1_type_i)
                            labels_map_list_type = [
                                "car", "van", "truck", "bus"
                            ]
                            max_type_value, max_type_index = maximum(
                                lis_res_type, len(lis_res_type))
                            class_id_type = max_type_index
                            det_label_type = labels_map_list_type[
                                class_id_type]
                        cv2.putText(frame,
                                    det_label_va + ' ' + det_label_type + ' ',
                                    (xmin, ymin - 7), cv2.FONT_HERSHEY_COMPLEX,
                                    0.6, (10, 10, 200), 2)

                        del exec_net_va_recog

                    if obj[2] != 1:
                        xmin = int(obj[3] * initial_h)
                        ymin = int(obj[4] * initial_w)
                        xmax = int(obj[5] * initial_h)
                        ymax = int(obj[6] * initial_w)
                        class_id = int(obj[1])
                        # Draw box and label\class_id
                        color = (min(class_id * 12.5,
                                     10), min(class_id * 7,
                                              10), min(class_id * 5, 10))
                        cv2.rectangle(frame, (xmin, ymin), (xmax, ymax),
                                      (0, 255, 255), 2)
                        crop_plate = frame[ymin:ymax, xmin:xmax]
                        crop_img_lpr = crop_plate
                        net_lpr_recognition = IENetwork(model=model_lpr_xml,
                                                        weights=model_lpr_bin)
                        input_blob_lpr = list(
                            net_lpr_recognition.inputs.keys())[0]
                        seq_id = list(net_lpr_recognition.inputs.keys())[1]
                        out_blob_lpr = next(iter(net_lpr_recognition.outputs))
                        log.info("Loading IR to the plugin...")
                        exec_net_lpr_recog = plugin.load(
                            network=net_lpr_recognition, num_requests=2)
                        n_lpr, c_lpr, h_lpr, w_lpr = net_lpr_recognition.inputs[
                            input_blob_lpr].shape
                        in_frame_seq_id = np.ones(88, dtype=np.int32)
                        in_frame_seq_id = in_frame_seq_id.reshape((88, 1))
                        in_frame_seq_id[0] = 0
                        if is_async_mode == False:
                            if crop_img_lpr.any():
                                in_frame_lpr = cv2.resize(
                                    crop_img_lpr, (w_lpr, h_lpr))
                            else:
                                in_frame_lpr = cv2.resize(
                                    frame, (w_lpr, h_lpr))
                            in_frame_lpr = in_frame_lpr.transpose(
                                (2, 0,
                                 1))  # Change data layout from HWC to CHW
                            in_frame_lpr = in_frame_lpr.reshape(
                                (n_lpr, c_lpr, h_lpr, w_lpr))
                            res_lpr = exec_net_lpr_recog.infer(
                                inputs={
                                    input_blob_lpr: in_frame_lpr,
                                    seq_id: in_frame_seq_id
                                })
                            lis_res_lpr = []
                            res_lpr_decode = res_lpr['decode']
                            for type_i in np.nditer(res_lpr_decode):
                                val1_lpr_i = int(type_i)
                                lis_res_lpr.append(val1_lpr_i)
                        class_id_lpr = lis_res_lpr[0]
                        det_label_lpr = list_lpr[class_id_lpr]
                        list_plate = []
                        for item_list in lis_res_lpr:
                            if item_list == -1:
                                break
                            else:
                                list_plate.append(list_lpr[item_list])
                        print("Number_plate", list_plate)
                        if len(list_plate) == 6:
                            list_plate.remove(list_plate[0])
                            str_lpr = ''.join(list_plate)
                            cv2.putText(frame, det_label_lpr + ' ' + str_lpr,
                                        (xmin, ymin - 7),
                                        cv2.FONT_HERSHEY_COMPLEX, 0.8,
                                        (10, 10, 200), 2)
                        del exec_net_lpr_recog

            # Draw performance stats
            inf_time_message = "Inference time: N\A for async mode" if is_async_mode else \
   "Inference time: {:.3f} ms".format(det_time * 1000)
            render_time_message = "OpenCV rendering time: {:.3f} ms".format(
                render_time * 1000)
            async_mode_message = "Async mode is on. Processing request {}".format(cur_request_id) if is_async_mode else \
   "Async mode is off. Processing request {}".format(cur_request_id)

            cv2.putText(frame, inf_time_message, (15, 15),
                        cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1)
            cv2.putText(frame, render_time_message, (15, 30),
                        cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)
            cv2.putText(frame, async_mode_message, (10, int(initial_h - 20)),
                        cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)

        render_start = time.time()
        cv2.imshow("Detection Results", frame)
        render_end = time.time()
        render_time = render_end - render_start

        if is_async_mode:
            cur_request_id, next_request_id = next_request_id, cur_request_id
            frame = next_frame

        key = cv2.waitKey(500)
        if key == 27:
            break
        if (9 == key):
            is_async_mode = not is_async_mode
            log.info("Switched to {} mode".format(
                "async" if is_async_mode else "sync"))

    cv2.destroyAllWindows()
    del exec_net_recognition
    del plugin
Exemplo n.º 18
0
    def __init__(self, dyda_config_path='', debug=False):
        """ __init__ of DetectorOpenVINO

        Trainer Variables:
            input_data: a list of image array
            results: defined by lab_tools.output_pred_detection()

        Arguments:
            dyda_config_path -- Trainer config filepath
        """
        if debug:
            logger.setLevel(logging.DEBUG)
        else:
            logger.setLevel(logging.INFO)

        # Setup dyda config
        super(DetectorOpenVINO,
              self).__init__(dyda_config_path=dyda_config_path)
        self.set_param(self.class_name)

        self.check_param_keys()

        if "threshold" in self.param.keys():
            self.threshold = self.param["threshold"]
        else:
            self.threshold = 0.3

        # Setup DL model
        model_xml = self.param['model_description']
        model_bin = self.param['model_file']
        with open(self.param['label_file'], 'r') as f:
            self.labels_map = [x.strip() for x in f]

        # Setup OpenVINO
        #
        # Plugin initialization for specified device and
        # load extensions library if specified
        #
        # Note: MKLDNN CPU-targeted custom layer support is not included
        #       because we do not use it yet.
        self.plugin = IEPlugin(device=self.param['device'],
                               plugin_dirs=self.param['plugin_dirs'])
        if self.param['device'] == 'CPU':
            for ext in self.param['cpu_extensions']:
                logger.info('Add cpu extension: {}'.format(ext))
                self.plugin.add_cpu_extension(ext)
        logger.debug("Computation device: {}".format(self.param['device']))

        # Read IR
        logger.debug("Loading network files:\n\t{}\n\t{}".format(
            model_xml, model_bin))
        net = IENetwork(model=model_xml, weights=model_bin)

        if self.plugin.device == "CPU":
            supported_layers = self.plugin.get_supported_layers(net)
            not_supported_layers = [
                l for l in net.layers.keys() if l not in supported_layers
            ]
            if len(not_supported_layers) != 0:
                logger.error(
                    ('Following layers are not supported '
                     'by the plugin for specified device {}:\n {}').format(
                         self.plugin.device, ', '.join(not_supported_layers)))
                logger.error("Please try to specify cpu "
                             "extensions library path in demo's "
                             "command line parameters using -l "
                             "or --cpu_extension command line argument")
                sys.exit(1)

        assert len(net.inputs.keys()) == 1, (
            'Demo supports only single input topologies')
        assert len(
            net.outputs) == 1, ('Demo supports only single output topologies')

        # input_blob and and out_blob are the layer names in string format.
        logger.debug("Preparing input blobs")
        self.input_blob = next(iter(net.inputs))
        self.out_blob = next(iter(net.outputs))

        self.n, self.c, self.h, self.w = net.inputs[self.input_blob].shape

        # Loading model to the plugin
        self.exec_net = self.plugin.load(network=net, num_requests=2)

        del net

        # Initialize engine mode: sync or async
        #
        # FIXME: async mode does not work currently.
        #        process_input needs to provide two input tensors for async.
        self.is_async_mode = False
        self.cur_request_id = 0
        self.next_request_id = 1
class emotionInfer():
    def __init__(self):

        self.model_xml_path = "1.xml"

        self.model_bin_path = "1.bin"

        self.model_landmark_path = "./shape_predictor_68_face_landmarks.dat"

        self.model_facedet_path = "./haarcascade_frontalface_alt2.xml"

        self.capPic = "capture.jpg"

        self.result = {}

        self.maxLen = 5

        self.averData = [0] * 7

        self.averDataQue = deque()

        self.plugin = IEPlugin(device='MYRIAD')

        self.net = IENetwork.from_ir(model=self.model_xml_path,
                                     weights=self.model_bin_path)

        self.exec_net = self.plugin.load(network=self.net)

        self.dataQueue = deque(maxlen=self.maxLen)

        assert len(self.net.inputs.keys()) == 1

        assert len(self.net.outputs) == 1

        self.input_blob = next(iter(self.net.inputs))

        # input_blob = 'input'

        self.out_blob = next(iter(self.net.outputs))

        # out_blob   = 'output/BiasAdd'

    def capturePic(self):

        success, frame = cameraCapture.read()

        img_path = self.capPic

        cv2.imwrite(img_path, frame)

        return img_path

    def infer_init(self):

        plugin = IEPlugin(device='MYRIAD')

        net = IENetwork.from_ir(model=self.model_xml_path,
                                weights=self.model_bin_path)

        exec_net = plugin.load(network=net)

        assert len(self.net.inputs.keys()) == 1

        assert len(self.net.outputs) == 1

        input_blob = next(iter(net.inputs))

        # input_blob = 'input'

        out_blob = next(iter(net.outputs))

        # out_blob   = 'output/BiasAdd'

    def pre_process_image(self, img_path):

        # Model input format

        n, c, h, w = [1, 3, 224, 224]

        image = Image.open(img_path)

        processedImg = image.resize((h, w), resample=Image.BILINEAR)

        # Normalize to keep data between 0 - 1

        processedImg = (np.array(processedImg) - 0) / 255.0

        # Change data layout from HWC to CHW

        processedImg = processedImg.transpose((2, 0, 1))

        processingImg = processedImg.reshape((n, c, h, w))

        return image, processingImg, img_path

    def emotion_infer(self, cap):

        #print(1)

        #self.averData = [0] * 7

        image, processedImg, imagePath = self.pre_process_image(cap)

        infer_result = self.exec_net.infer(
            inputs={self.input_blob: processedImg})

        emotion_array = infer_result["dense_2/Softmax"][0]

        #request_data_json = {"vehicle_id": random.randint(0,100), "latitude": "12.34",

        # "longitude": "12.34", "tired": "12.34", "生气": emotion_array[0],

        # "厌恶": emotion_array[1], "恐惧": emotion_array[2],

        # "开心": emotion_array[3], "伤心": emotion_array[4], "惊讶": emotion_array[5], "正常": emotion_array[6]}

        #self.result = request_data_json

        self.dataQueue.append(emotion_array)

        #print(len(self.dataQueue))

        self.averData = [0] * 7

        while len(self.dataQueue) == 5:

            for data in self.dataQueue:

                #print(data)

                for num in range(7):

                    self.averData[num] += data[num]

            self.averData = [i / 5 for i in self.averData]

            #print(self.averData)

            #self.averDataQue.appendleft(self.averData)

            break

        return self.averData
Exemplo n.º 20
0
def main():
    # Setup log config
    log.basicConfig(format="[ %(levelname)s ] %(message)s",
                    level=log.INFO,
                    stream=sys.stdout)

    # Parse args
    args = parse_args()
    log.info("Start Fall Detection")

    # Plugin initialization for specified device
    log.info("Initializing plugin for {} device...".format(args.device))
    plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
    if args.cpu_extension and 'CPU' in args.device:
        plugin.add_cpu_extension(args.cpu_extension)

    # Load model
    model_xml = os.path.join(os.getcwd(), "models",
                             "human-pose-estimation-0001",
                             args.model_precision,
                             "human-pose-estimation-0001.xml")
    model_bin = os.path.splitext(model_xml)[0] + ".bin"
    log.info("Reading IR...")
    net = IENetwork(model=model_xml, weights=model_bin)

    if plugin.device == "CPU":
        supported_layers = plugin.get_supported_layers(net)
        not_supported_layers = [
            l for l in net.layers.keys() if l not in supported_layers
        ]
        if len(not_supported_layers) != 0:
            log.error("""Following layers are not supported by the plugin
                for specified device {}:\n {}""".format(
                plugin.device, ', '.join(not_supported_layers)))
            log.error("""Please try to specify cpu extensions library
                    path in demo's command line parameters using -l
                    or --cpu_extension command line argument
                """)
            sys.exit(1)
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))
    log.info("Loading IR to the plugin...")
    exec_net = plugin.load(network=net, num_requests=2)

    # Read and pre-process input image
    n, c, h, w = net.inputs[input_blob].shape
    del net
    if args.input == 'cam':
        input_stream = 0
    else:
        input_stream = args.input
        assert os.path.isfile(args.input), "Specified input file doesn't exist"

    cap = cv2.VideoCapture(input_stream)
    # Grab the shape of the input
    width = int(cap.get(3))
    height = int(cap.get(4))
    fps = cap.get(cv2.CAP_PROP_FPS)
    font_scale = round(height / 360)
    font_thickness = round(3 * font_scale)

    cur_request_id = 0
    next_request_id = 1

    # Fall Detection variables
    previous_head_avg_position = 0
    previous_head_detection_frame = 0
    last_fall_detected_frame = 0
    # Fall Detection threshold speed is depedent of the frame height
    fall_threshold = 0.04 * height
    framerate_threshold = round(fps / 5.0)
    fall_detected_text_position = (20, round(0.15 * height))

    render_time = 0
    ret, frame = cap.read()
    frame_number = 0

    out_file = None
    if args.input != 'cam':
        out_filename = os.path.splitext(input_stream)[0] + '_output.mp4'
        out_file = cv2.VideoWriter(out_filename, 0x00000021, fps,
                                   (width, height))
        log.info("Evaluating video file stream...")
    else:
        log.info("Evaluating webcam stream...")

    while cap.isOpened():
        ret, next_frame = cap.read()
        if not ret:
            break

        # Pre-process inputs
        in_frame = cv2.resize(next_frame, (w, h))
        in_frame = in_frame.transpose((2, 0, 1))
        in_frame = in_frame.reshape((n, c, h, w))

        # Inference
        exec_net.start_async(request_id=next_request_id,
                             inputs={input_blob: in_frame})
        if exec_net.requests[cur_request_id].wait(-1) == 0:
            # Parse detection results of the current request
            res = exec_net.requests[cur_request_id].outputs
            kp_heatmaps = res['Mconv7_stage2_L2']

            threshold = 0.5
            points = []
            head_elements_y_pos = []

            for i in range(POSE_POINTS_NUMBER):
                # confidence map of corresponding body's part.
                probMap = kp_heatmaps[0, i, :, :]

                # Find global maxima of the probMap.
                minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)

                # Scale the point to fit on the original image
                x = frame.shape[1] / probMap.shape[1] * point[0]
                y = frame.shape[0] / probMap.shape[0] * point[1]

                # Add point if the probability is greater than the threshold
                if prob > threshold:
                    point = (int(x), int(y))

                    # If point is a component of the head (including neck and
                    # sholders) append to the header elemnts
                    if (i == 0 or i == 1 or i == 2 or i == 5 or i == 14
                            or i == 15 or i == 16 or i == 17):
                        head_elements_y_pos.append(point[1])

                    points.append(point)
                else:
                    points.append(None)

            # Draw Skeleton
            for num, pair in enumerate(POSE_PAIRS):
                partA = pair[0]
                partB = pair[1]
                if points[partA] and points[partB]:
                    cv2.line(frame, points[partA], points[partB],
                             JOINT_COLORS[num], 3)

            # Calculate head average position from its components
            if (len(head_elements_y_pos) > 0):
                head_avg_position = sum(head_elements_y_pos)
                head_avg_position /= len(head_elements_y_pos)
                # log.info(head_avg_position)

                # Compare previous head position
                # to detect if falling
                if (previous_head_detection_frame
                        and (head_avg_position - previous_head_avg_position) >
                        fall_threshold
                        and (frame_number - previous_head_detection_frame) <
                        framerate_threshold):
                    # print("Fall detected.")
                    last_fall_detected_frame = frame_number

                previous_head_avg_position = head_avg_position
                previous_head_detection_frame = frame_number

            # Draw Fall Detection Text if last fall event
            # ocurred max 2 seconds ago
            if (last_fall_detected_frame
                    and (frame_number - last_fall_detected_frame) <= 2 * fps):
                cv2.putText(frame, "FALL DETECTED!",
                            fall_detected_text_position,
                            cv2.FONT_HERSHEY_COMPLEX, font_scale, (0, 0, 255),
                            font_thickness, cv2.LINE_AA)

        render_start = time.time()
        # If webcam mode
        if out_file:
            out_file.write(frame)
        else:
            cv2.imshow("Detection Results", frame)

        cur_request_id, next_request_id = next_request_id, cur_request_id
        frame = next_frame

        # Increment frame number
        frame_number += 1

        key = cv2.waitKey(1)
        if key == 27:
            break

    if out_file:
        # Release the out writer, capture, and destroy any OpenCV windows
        out_file.release()
        out_filename = os.path.splitext(input_stream)[0] + '_output.mp4'
        log.info("Finished. %s saved." % (out_filename))
    else:
        log.info("Finished.")
    cv2.destroyAllWindows()
    del exec_net
Exemplo n.º 21
0
import argparse
from openvino.inference_engine import IENetwork, IEPlugin

from test import preprocessing, postprocessing

args = argparse.ArgumentParser()
args.add_argument("images", nargs='*', type=str)
args.add_argument("-d", "--device"   , type=str, default="MYRIAD", help="Default MYRIAD or CPU")
args = args.parse_args()

data_type="FP16"
if args.device == "CPU": data_type="FP32"

model_xml='./'+data_type+'/y.xml'
model_bin='./'+data_type+'/y.bin'
plugin = IEPlugin(device=args.device, plugin_dirs=None)
extension = "/inference_engine_samples/intel64/Release/lib/libcpu_extension.so"
extension = os.environ['HOME']+extension
if args.device == "CPU":plugin.add_cpu_extension(extension)
net = IENetwork(model=model_xml, weights=model_bin)	# R5

print(model_bin, "on", args.device)
exec_net = plugin.load(network=net, num_requests=1)

input_blob = next(iter(net.inputs))  #input_blob = 'data'
out_blob   = next(iter(net.outputs)) #out_blob   = 'detection_out'
print(net.inputs[input_blob].shape)
model_n, model_c, model_h, model_w = net.inputs[input_blob].shape #Tool kit R4
print("input_blob shape(from xml)", net.inputs[input_blob].shape)
print("name (input_blob : out_blob) =",input_blob,":",out_blob)
Exemplo n.º 22
0
from openvino.inference_engine import IENetwork, IEPlugin
import os
import numpy as np

plugin = IEPlugin(dev=args.device, plugin_dirs=args.plugin_dirs)

model_xml = args.model
model_bin = os.path.splitext(model_xml)[0] + '.bin'

net = IENetwork(model=model_xml, weights=model_bin)

Exec_net = plugin.load(network=net)

# Read and preprocess the images
n, c, h, w = net.inputs[input_blob].shape
images = np.ndarray(shape=(n, c, h, w))
for i in range(n):
    image = cv2.imread(args.input[i])
    if image.shape[:-1] != (h, w):
        image = cv2.resize(image, (w, h))
    image = image.transpose((2, 0, 1))
    images[i] = image

res = Exec_net.infer(inputs={input_blob: images})

res = res[out_blob]
for i, probs in enumerate(res):
    probs = np.squeeze(probs)
    top_ind = np.argsort(probs)[-args.number_top:][::-1]
    print(f'Input {args.input[i]}\n')
    for id in top_ind:
Exemplo n.º 23
0
def Inference():

    try:

        #Get the Model XML file from the XML entry of the GUI
        model_xml = xml_entry.get()
        if (not model_xml):
            msg = messagebox.showinfo(
                "Open Vino Inference",
                "Please browse and select the model XML file")
            return

        #Set the Model file as per the naming convention of the XML. Assumption here is that the XML and BIN files are the same naming format
        model_bin = os.path.splitext(model_xml)[0] + ".bin"

        #Get and set the label file path
        labels_filepath = label_entry.get()

        if (not labels_filepath):
            msg = messagebox.showinfo(
                "Inference", "Please browse and select the labels file")
            return
        #Get the device option from one of the three options(CPU, GPU or Movidius)
        device_option = strDevice.get()

        if (device_option == "Movidius"):
            device_option = "MYRIAD"

        #Get the feed option here as the input. This is from one of the three options(Image, Video or Movidius)
        feed_option = strCapture.get()
        fileext = os.path.splitext(image_entry.get())

        #Valiating ths user input for the selected image. If it is empty promt to select the Image again
        if ((feed_option == "Image") and (not image_entry.get())):
            msg = messagebox.showinfo(
                "Inference", "Please browse and select the Image files")
            return
        #Valiating ths user input for the selected Video. If it is empty promt to select the Video file again
        if ((feed_option == "Video") and (not image_entry.get())):
            msg = messagebox.showinfo(
                "Inference", "Please browse and select the Video files")
            return

        #Valiating ths selected image if it is a JPG image
        if (feed_option == "Image" and
            (fileext[1].lower() != ".jpg" and fileext[1].lower() != ".jpeg")):
            msg = messagebox.showinfo(
                "Inference",
                "Please select valid Image files. Jpeg files are supported")
            return
        #Valiating ths selected Video if it is a .MP4,.MPEG or .AVI format
        if (feed_option == "Video" and
            (fileext[1].lower() != ".mp4" and fileext[1].lower() != ".avi"
             and fileext[1].lower() != ".mpeg")):
            msg = messagebox.showinfo(
                "Inference",
                "Please enter valid Video files. MP4,AVI or MPEG files are supported"
            )
            return

        image_list = []
        #if the Option to Scan the full folder is selected
        if (scanFolder.get()):
            #For video option only one video that is selected is appended. Does not support multiple videos if exist in that folder
            if (feed_option == "Video"):
                image_list.append(image_entry.get())
            #For images when Scan folder is selected, it browses through all the JPG files in the folder and appends to the list to do Inference
            elif (feed_option == "Image"):
                full_folder = os.path.dirname(image_entry.get())
                for file_name in os.listdir(full_folder):
                    if file_name.endswith(".jpg") or file_name.endswith(
                            ".jpeg"):
                        image_list.append(os.path.join(full_folder, file_name))
                    else:
                        continue
            else:
                pass
        else:
            image_list.append(image_entry.get())
        # Plugin initialization for specified device
        plugin = IEPlugin(device=device_option)
        # Read IR
        net = IENetwork(model=model_xml, weights=model_bin)

        input_blob, out_blob = next(iter(net.inputs)), next(iter(net.outputs))
        net.batch_size = 1

        ################################################################################
        #Loading the labels file
        with open(labels_filepath, 'r') as f:
            labels_map = [x.split(sep=' ', maxsplit=1)[-1].strip() for x in f]

        # Read and pre-process input image
        n, c, h, w = net.inputs[input_blob].shape
        exec_net = plugin.load(network=net)

        #if option selected is image
        if (feed_option == "Image"):
            #Scan thorugh the Image folder list and starting Inference on the Images
            for i in range(len(image_list)):
                tm = time.time()
                cap = cv2.imread(image_list[i])
                frame_ = cap
                frame_ = preprocess_input(frame_)
                res = exec_net.infer(inputs={
                    input_blob:
                    [cv2.resize(frame_, (w, h)).transpose((2, 0, 1))]
                })
                res = res[out_blob]
                clslbl = []
                #Calculating the threshold probabablity
                for i, probs in enumerate(res):
                    top_ind = np.argsort(np.squeeze(probs))[-10:][::-1]
                    for id in top_ind:
                        clslbl.append("{} ({:.2f}%)".format(
                            labels_map[id], 100 * probs[id]))
                frame_ = cv2.resize(frame_, (800, 460))
                #Showing the Image, threshold and the Inferences per second details on the frame
                txt = ('[%02d INF/S] Prediction: ' %
                       (1 / (time.time() - tm))) + clslbl[0]
                txtlabel.config(text=txt)

                #If scanning thorugh the complete folder, showing the Image slide show,shows the Inference results along with the Threshold details
                #The image changes every 2 seconds in the slide show and one can quit by pressing the q button
                if (scanFolder.get()):
                    final_txt = "Press \\'q\\'  to Quit"
                    cv2.putText(frame_, txt, (10, 30),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.80, (0, 255, 0), 2)
                    cv2.putText(frame_, final_txt, (10, 65),
                                cv2.FONT_HERSHEY_SIMPLEX, .80, (0, 255, 0), 2)
                    cv2.imshow('Inference', frame_)
                    if cv2.waitKey(2000) & 0xFF == ord('q'): break
        #If Camera option is selected, starts the live camera feed
        elif (feed_option == "Camera"):
            cap = cv2.VideoCapture(0)
            while TRUE:
                tm = time.time()
                ret, frame_ = cap.read()
                frame_ = preprocess_input(frame_)
                res = exec_net.infer(inputs={
                    input_blob:
                    [cv2.resize(frame_, (w, h)).transpose((2, 0, 1))]
                })
                res = res[out_blob]
                clslbl = []
                #Calculating the threshold probabablity
                for i, probs in enumerate(res):
                    top_ind = np.argsort(np.squeeze(probs))[-10:][::-1]
                    for id in top_ind:
                        clslbl.append("{} ({:.2f}%)".format(
                            labels_map[id], 100 * probs[id]))
                frame_ = cv2.resize(frame_, (820, 460))
                txt = ('[%02d FPS] Prediction: ' %
                       (1 / (time.time() - tm))) + clslbl[0]
                final_txt = "Press \\'q\\'  to Quit"
                cv2.putText(frame_, txt, (10, 30), cv2.FONT_HERSHEY_SIMPLEX,
                            0.80, (0, 255, 0), 2)
                cv2.putText(frame_, final_txt, (10, 65),
                            cv2.FONT_HERSHEY_SIMPLEX, .80, (0, 255, 0), 2)
                #showing the Camera live feed and shows the Inference results in a separate frame along with the Threshold details and the FPS
                # one can quit by pressing the q button
                cv2.imshow('Inference', frame_)
                if cv2.waitKey(1) & 0xFF == ord('q'): break
        else:
            #If Video is selected as the capture option, loads the selected video and starts inference process
            cap = cv2.VideoCapture(image_list[0])
            while (cap.isOpened()):
                tm = time.time()
                ret, frame_ = cap.read()
                frame_ = preprocess_input(frame_)
                res = exec_net.infer(inputs={
                    input_blob:
                    [cv2.resize(frame_, (w, h)).transpose((2, 0, 1))]
                })
                res = res[out_blob]
                clslbl = []
                for i, probs in enumerate(res):
                    top_ind = np.argsort(np.squeeze(probs))[-10:][::-1]
                    for id in top_ind:
                        clslbl.append("{} ({:.2f}%)".format(
                            labels_map[id], 100 * probs[id]))
                frame_ = cv2.resize(frame_, (820, 460))
                txt = ('[%02d FPS] Prediction: ' %
                       (1 / (time.time() - tm))) + clslbl[0]
                final_txt = "Press \\'q\\'  to Quit"
                cv2.putText(frame_, txt, (10, 30), cv2.FONT_HERSHEY_SIMPLEX,
                            0.80, (0, 255, 0), 2)
                cv2.putText(frame_, final_txt, (10, 65),
                            cv2.FONT_HERSHEY_SIMPLEX, .80, (0, 255, 0), 2)
                #Loads the video file in a separate frame and shows the inference results like Threshold details and the FPS
                # one can quit by pressing the q button
                cv2.imshow('Inference', frame_)
                if cv2.waitKey(1) & 0xFF == ord('q'): break

        #cap.release()
        cv2.destroyAllWindows()
        del exec_net, plugin, net
    except BaseException as e:
        msg = messagebox.showinfo("Inference",
                                  "An exception is encountered. " + str(e))
Exemplo n.º 24
0
def main() :
    words = ['a', 'b', 'c', 'd', 'e', 'f', 
             'g', 'h', 'i', 'j', 'k', 'l', 
             'm', 'n', 'o', 'p', 'q', 'r', 
             's', 't', 'u', 'v', 'w', 'x', 
             'y', 'z', '0', '1', '2', '3', 
             '4', '5', '6', '7', '8', '9', 
             '-']    

    args = parsing().parse_args()

    model_graph = args.model
    model_weight = args.model[:-3] + 'bin'

    net = IENetwork(model = model_graph, 
                    weights = model_weight)

    # net.batch_size = 2

    iter_inputs = iter(net.inputs)
    iter_outputs = iter(net.outputs)
   
    # inputs_num = len(net.inputs)
    # print (inputs_num)

    '''
    input_blob = []
    for _inputs in iter_inputs:
        input_blob.append(_inputs)

    output_blob = []
    for _outputs in iter_outputs:
        output_blob.append(_outputs)
    '''

    '''
    input_l = []
    for i in input_blob:
        print (net.inputs[i].shape)
        input_l.append(np.ones(shape=net.inputs[i].shape, dtype=np.float32))

    inputs = dict()
    for i in range (inputs_num):
        inputs[input_blob[i]] = input_l[i]
    '''

    input_blob = next(iter_inputs)
    output_blob = next(iter_outputs)

    if args.input == '':
        input = np.ones(shape=net.inputs[input_blob].shape, dtype = np.float32)
    else :
        b, c, h, w = net.inputs[input_blob].shape
        print (b, c, h, w)
        import cv2
        input = cv2.imread(args.input)
        print (input.shape)
        input = cv2.resize(input, (w, h))
        input = input.transpose((2, 0, 1)).reshape(1, c, h, w) 

    plugin = IEPlugin(device = 'CPU')
    exec_net = plugin.load(network = net)
    # if args.cpu_extension :
    #    plugin.add_cpu_extension(args.cpu_extension)
    # res = plugin.impl.CTCGreedyDecoder();

    a =  np.concatenate((input, input), 0)
    print (a.shape)    

    inputs = {input_blob: input}
    out = exec_net.infer(inputs)

    # print (out[output_blob])
    
    print (decoder.CTCGreedyDecoder(out[output_blob], words, words[-1]))
    
    print (decoder.CTCBeamSearchDecoder(out[output_blob], words, words[-1], 50))


    ''' 
Exemplo n.º 25
0
def main():
    # Plugin initialization for specified device and load extensions library
    global rolling_log
    #defaultTarget = TARGET_DEVICE

    env_parser()
    args_parser()
    check_args()
    parse_conf_file()

    # if TARGET_DEVICE not in acceptedDevices:
    #     print ("Unsupporterd device " + TARGET_DEVICE + ". Defaulting to CPU")
    #     TARGET_DEVICE = 'CPU'

    print("Initializing plugin for {} device...".format(TARGET_DEVICE))
    plugin = IEPlugin(device=TARGET_DEVICE)
    if CPU_EXTENSION and 'CPU' == TARGET_DEVICE:
        plugin.add_cpu_extension(CPU_EXTENSION)

    # Read IR
    print("Reading IR...")
    net = IENetwork(model=model_xml, weights=model_bin)
    assert len(
        net.inputs.keys()) == 1, "Sample supports only single input topologies"
    assert len(
        net.outputs) == 1, "Sample supports only single output topologies"
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))

    # Load the IR
    print("Loading IR to the plugin...")
    exec_net = plugin.load(network=net, num_requests=2)
    # Read and pre-process input image
    n, c, h, w = net.inputs[input_blob].shape
    del net

    minFPS = min([i.cap.get(cv2.CAP_PROP_FPS) for i in videoCaps])
    minlength = min([i.cap.get(cv2.CAP_PROP_FRAME_COUNT) for i in videoCaps])
    for vc in videoCaps:
        vc.rate = int(math.ceil(vc.length / minlength))
    print(minFPS)
    waitTime = int(
        round(1000 / minFPS /
              len(videoCaps)))  # wait time in ms between showing frames
    frames_sum = 0
    for vc in videoCaps:
        vc.init_vw(h, w, minFPS)
        frames_sum += vc.length
    statsWidth = w if w > 345 else 345
    statsHeight = h if h > (len(videoCaps) * 20 + 15) else (
        len(videoCaps) * 20 + 15)
    statsVideo = cv2.VideoWriter(os.path.join(output_dir,
                                              'Statistics.mp4'), 0x00000021,
                                 minFPS, (statsWidth, statsHeight), True)
    if not statsVideo.isOpened():
        print("Couldn't open stats video for writing")
        sys.exit(4)

    # Read the labels file
    if labels_file:
        with open(labels_file, 'r') as f:
            labels_map = [x.strip() for x in f]
    else:
        labels_map = None

    # Init a rolling log to store events
    rolling_log_size = int((h - 15) / 20)
    rolling_log = collections.deque(maxlen=rolling_log_size)

    # Init inference request IDs
    cur_request_id = 0
    next_request_id = 1
    # Start with async mode enabled
    is_async_mode = True

    if not UI_OUTPUT:
        # Arrange windows so they are not overlapping
        #arrange_windows(w, h)
        print("To stop the execution press Esc button")

    no_more_data = False

    for vc in videoCaps:
        vc.start_time = datetime.datetime.now()
    frame_count = 0
    job_id = os.environ['PBS_JOBID']
    progress_file_path = os.path.join(output_dir,
                                      'i_progress_' + job_id + '.txt')
    infer_start_time = time.time()

    #Start while loop
    while True:
        # If all video captures are closed stop the loop
        if False not in [videoCap.closed for videoCap in videoCaps]:
            print("I broke here line 387")
            break

        no_more_data = False

        # loop over all video captures
        for idx, videoCapInfer in enumerate(videoCaps):
            # read the next frame
            #print("Video {0} has length {1} and fps {2}".format(idx, videoCapInfer.length,  videoCapInfer.fps))
            if not videoCapInfer.closed:
                #print("ID {0}".format(idx))
                vfps = int(round(videoCapInfer.cap.get(cv2.CAP_PROP_FPS)))
                #for i in range(0, int(round(vfps / minFPS))):
                for i in range(videoCapInfer.rate):
                    frame_count += 1
                    #print("i = {0}".format(i))
                    ret, frame = videoCapInfer.cap.read()
                    videoCapInfer.cur_frame_count += 1
                    # If the read failed close the program
                    if not ret:
                        videoCapInfer.closed = True
                        no_more_data = True
                        break

                if videoCapInfer.closed:
                    print("Video {0} is done".format(idx))
                    print("Video has  {0} frames ".format(
                        videoCapInfer.length))
                    break

                # Copy the current frame for later use
                videoCapInfer.cur_frame = frame.copy()
                videoCapInfer.initial_w = videoCapInfer.cap.get(3)
                videoCapInfer.initial_h = videoCapInfer.cap.get(4)
                # Resize and change the data layout so it is compatible
                in_frame = cv2.resize(videoCapInfer.cur_frame, (w, h))
                in_frame = in_frame.transpose(
                    (2, 0, 1))  # Change data layout from HWC to CHW
                in_frame = in_frame.reshape((n, c, h, w))

                infer_start = datetime.datetime.now()
                if is_async_mode:
                    exec_net.start_async(request_id=next_request_id,
                                         inputs={input_blob: in_frame})
                    # Async enabled and only one video capture
                    if (len(videoCaps) == 1):
                        videoCapResult = videoCapInfer
                    # Async enabled and more than one video capture
                    else:
                        # Get previous index
                        videoCapResult = videoCaps[idx - 1 if idx -
                                                   1 >= 0 else len(videoCaps) -
                                                   1]
                else:
                    # Async disabled
                    exec_net.start_async(request_id=cur_request_id,
                                         inputs={input_blob: in_frame})
                    videoCapResult = videoCapInfer

                if exec_net.requests[cur_request_id].wait(-1) == 0:
                    infer_end = datetime.datetime.now()
                    infer_duration = infer_end - infer_start
                    current_count = 0
                    # Parse detection results of the current request
                    res = exec_net.requests[cur_request_id].outputs[out_blob]
                    for obj in res[0][0]:
                        class_id = int(obj[1])
                        # Draw only objects when probability more than specified threshold
                        if (obj[2] > PROB_THRESHOLD
                                and videoCapResult.req_label in labels_map
                                and labels_map.index(
                                    videoCapResult.req_label) == class_id - 1):
                            current_count += 1
                            xmin = int(obj[3] * videoCapResult.initial_w)
                            ymin = int(obj[4] * videoCapResult.initial_h)
                            xmax = int(obj[5] * videoCapResult.initial_w)
                            ymax = int(obj[6] * videoCapResult.initial_h)
                            # Draw box
                            cv2.rectangle(videoCapResult.cur_frame,
                                          (xmin, ymin), (xmax, ymax),
                                          (0, 255, 0), 4, 16)

                    if videoCapResult.candidate_count is current_count:
                        videoCapResult.candidate_confidence += 1
                    else:
                        videoCapResult.candidate_confidence = 0
                        videoCapResult.candidate_count = current_count

                    if videoCapResult.candidate_confidence is FRAME_THRESHOLD:
                        videoCapResult.candidate_confidence = 0
                        if current_count > videoCapResult.last_correct_count:
                            videoCapResult.total_count += current_count - videoCapResult.last_correct_count

                        if current_count is not videoCapResult.last_correct_count:
                            if UI_OUTPUT:
                                currtime = datetime.datetime.now().strftime(
                                    "%H:%M:%S")
                                fr = FrameInfo(videoCapResult.frames,
                                               current_count, currtime)
                                videoCapResult.countAtFrame.append(fr)
                            new_objects = current_count - videoCapResult.last_correct_count
                            for _ in range(new_objects):
                                str = "{} - {} detected on {}".format(
                                    time.strftime("%H:%M:%S"),
                                    videoCapResult.req_label,
                                    videoCapResult.cap_name)
                                rolling_log.append(str)

                        videoCapResult.frames += 1
                        videoCapResult.last_correct_count = current_count
                    else:
                        videoCapResult.frames += 1

                    videoCapResult.cur_frame = cv2.resize(
                        videoCapResult.cur_frame, (w, h))

                    if not UI_OUTPUT:
                        # Add log text to each frame
                        log_message = "Async mode is on." if is_async_mode else \
                                      "Async mode is off."
                        cv2.putText(videoCapResult.cur_frame, log_message,
                                    (15, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                    (255, 255, 255), 1)
                        log_message = "Total {} count: {}".format(
                            videoCapResult.req_label,
                            videoCapResult.total_count)
                        cv2.putText(videoCapResult.cur_frame, log_message,
                                    (10, h - 10), cv2.FONT_HERSHEY_SIMPLEX,
                                    0.5, (255, 255, 255), 1)
                        log_message = "Current {} count: {}".format(
                            videoCapResult.req_label,
                            videoCapResult.last_correct_count)
                        cv2.putText(videoCapResult.cur_frame, log_message,
                                    (10, h - 30), cv2.FONT_HERSHEY_SIMPLEX,
                                    0.5, (255, 255, 255), 1)
                        cv2.putText(
                            videoCapResult.cur_frame, 'Infer wait: %0.3fs' %
                            (infer_duration.total_seconds()), (10, h - 70),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)

                        # Display inferred frame and stats
                        stats = numpy.zeros((statsHeight, statsWidth, 1),
                                            dtype='uint8')
                        for i, log in enumerate(rolling_log):
                            cv2.putText(stats, log, (10, i * 20 + 15),
                                        cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                        (255, 255, 255), 1)
                        #cv2.imshow(STATS_WINDOW_NAME, stats)
                        if idx == 0:
                            stats = cv2.cvtColor(stats, cv2.COLOR_GRAY2BGR)
                            #Write
                            statsVideo.write(stats)
                        end_time = datetime.datetime.now()
                        cv2.putText(
                            videoCapResult.cur_frame, 'FPS: %0.2fs' %
                            (1 / (end_time -
                                  videoCapResult.start_time).total_seconds()),
                            (10, h - 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                            (255, 255, 255), 1)
                        #cv2.imshow(videoCapResult.cap_name, videoCapResult.cur_frame)
                        videoCapResult.start_time = datetime.datetime.now()
                        #Write
                        videoCapResult.video.write(videoCapResult.cur_frame)

            if frame_count % 10 == 0:
                progressUpdate(progress_file_path,
                               time.time() - infer_start_time, frame_count,
                               frames_sum)

            # Wait if necessary for the required time
            #key = cv2.waitKey(waitTime)
            key = cv2.waitKey(1)

            # Esc key pressed
            if key == 27:
                cv2.destroyAllWindows()
                del exec_net
                del plugin
                print("Finished")
                return
            # Tab key pressed
            if key == 9:
                is_async_mode = not is_async_mode
                print("Switched to {} mode".format(
                    "async" if is_async_mode else "sync"))

            if is_async_mode:
                # Swap infer request IDs
                cur_request_id, next_request_id = next_request_id, cur_request_id

            # Loop video if LOOP_VIDEO = True and input isn't live from USB camera
            if LOOP_VIDEO and not videoCapInfer.is_cam:
                vfps = int(round(videoCapInfer.cap.get(cv2.CAP_PROP_FPS)))
                # If a video capture has ended restart it
                if (videoCapInfer.cur_frame_count >
                        videoCapInfer.cap.get(cv2.CAP_PROP_FRAME_COUNT) -
                        int(round(vfps / minFPS))):
                    videoCapInfer.cur_frame_count = 0
                    videoCapInfer.cap.set(cv2.CAP_PROP_POS_FRAMES, 0)

        if no_more_data:
            progressUpdate(progress_file_path,
                           time.time() - infer_start_time, frames_sum,
                           frames_sum)
            break


#End of while loop--------------------
    no_more_data = True
    t2 = time.time() - infer_start_time
    for videos in videoCaps:
        print(videos.length)
        print(videos.closed)
    print("End loop")
    print("Total time {0}".format(t2))
    print("Total frame count {0}".format(frame_count))
    print("fps {0}".format(frame_count / t2))
    with open(os.path.join(output_dir, 'stats.txt'), 'w') as f:
        f.write('{} \n'.format(round(t2)))
        f.write('{} \n'.format(frame_count))

    for vc in videoCaps:
        print("Frames processed {}".format(vc.cur_frame_count))
        print("Frames count {}".format(vc.length))

    for vc in videoCaps:
        vc.video.release()
        vc.cap.release()

        if no_more_data:
            break
Exemplo n.º 26
0
        default="CPU",
        type=str)

    return parser


log.basicConfig(format="[ %(levelname)s ] %(message)s",
                level=log.INFO,
                stream=sys.stdout)
args = build_argparser().parse_args()
model_xml = args.model
model_bin = os.path.splitext(model_xml)[0] + ".bin"

# Plugin initialization for specified device and load extensions library if specified
log.info("Initializing plugin for {} device...".format(args.device))
plugin = IEPlugin(device=args.device, plugin_dirs=None)

# Read IR
log.info("Reading IR...")
net = IENetwork(model=model_xml, weights=model_bin)

if plugin.device == "CPU":
    supported_layers = plugin.get_supported_layers(net)
    not_supported_layers = [
        l for l in net.layers.keys() if l not in supported_layers
    ]
    if len(not_supported_layers) != 0:
        log.error(
            "Following layers are not supported by the plugin for specified device {}:\n {}"
            .format(plugin.device, ', '.join(not_supported_layers)))
        log.error(
Exemplo n.º 27
0
    video_capture = cv2.VideoCapture(0)

# Prepare labels map
with open(args.labels) as f:
    labels_map = json.load(f)

# switch keys and values
labels_map = dict((y, x) for (x, y) in labels_map.items())

# setup logger
logging.basicConfig(format='[ %(levelname)s ] %(message)s',
                    level=logging.INFO,
                    stream=sys.stdout)

# Load model into inference engine
plugin = IEPlugin(args.device)
net = IENetwork(model=args.model, weights=args.weights)
input_blob = next(iter(net.inputs))
out_blob = next(iter(net.outputs))
exec_net = plugin.load(network=net, num_requests=2)

# Input shape: [n_samples, n_channels, height, width]
input_shape = net.inputs[input_blob].shape
input_height = input_shape[2]
input_width = input_shape[3]
assert input_shape[0] == 1


def preprocess(frame):
    frame = cv2.resize(frame, (input_width, input_height))
    frame = frame.astype(np.float32)
def main():
    log.basicConfig(format="[ %(levelname)s ] %(message)s",
                    level=log.INFO,
                    stream=sys.stdout)
    args = build_argparser().parse_args()
    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"
    # Plugin initialization for specified device and load extensions library if specified
    log.info("Initializing plugin for %s device...", args.device)
    plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
    if args.cpu_extension and 'CPU' in args.device:
        plugin.add_cpu_extension(args.cpu_extension)

    # Read IR
    log.info("Reading IR...")
    net = IENetwork(model=model_xml, weights=model_bin)

    if "CPU" in plugin.device:
        supported_layers = plugin.get_supported_layers(net)
        not_supported_layers = [
            l for l in net.layers.keys() if l not in supported_layers
        ]
        if not_supported_layers:
            log.error(
                "Following layers are not supported by the plugin for specified device %s:\n %s",
                plugin.device, ', '.join(not_supported_layers))
            log.error(
                "Please try to specify cpu extensions library path in sample's command line parameters using -l "
                "or --cpu_extension command line argument")
            sys.exit(1)
    assert len(
        net.inputs.keys()) == 1, "Sample supports only single input topologies"
    assert len(
        net.outputs) == 1, "Sample supports only single output topologies"
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))
    log.info("Loading IR to the plugin...")
    exec_net = plugin.load(network=net, num_requests=2)

    # Read and pre-process input image
    batch, channels, height, width = net.inputs[input_blob].shape
    del net

    predictions = []
    data = Input(args.input_type, args.input)
    cur_request_id = 0

    fps = 25
    out_width = 640
    out_height = 480
    if args.dump_output_video:
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        out = cv2.VideoWriter(args.path_to_output_video, fourcc, fps,
                              (int(out_width), int(out_height)))

    while not data.is_finished():
        frame, img_id = data.get_next_item()
        initial_h, initial_w, _ = frame.shape
        in_frame = cv2.resize(frame, (width, height))
        in_frame = in_frame.transpose(
            (2, 0, 1))  # Change data layout from HWC to CHW
        in_frame = in_frame.reshape((batch, channels, height, width))

        exec_net.start_async(request_id=cur_request_id,
                             inputs={input_blob: in_frame})
        if exec_net.requests[cur_request_id].wait(-1) == 0:

            # Parse detection results of the current request
            res = exec_net.requests[cur_request_id].outputs[out_blob]
            coco_detections = []
            for obj in res[0][0]:
                # Draw only objects when probability more than specified threshold
                if obj[2] > args.prob_threshold:
                    top_left_x = float(obj[3] * initial_w)
                    top_left_y = float(obj[4] * initial_h)
                    bottom_right_x = float(obj[5] * initial_w)
                    bottom_right_y = float(obj[6] * initial_h)

                    obj_width = round(bottom_right_x - top_left_x, 1)
                    obj_height = round(bottom_right_y - top_left_y, 1)
                    class_id = int(obj[1])

                    coco_det = {}
                    coco_det['image_id'] = img_id
                    coco_det['category_id'] = class_id
                    coco_det['bbox'] = [
                        round(top_left_x, 1),
                        round(top_left_y, 1), obj_width, obj_height
                    ]
                    coco_det['score'] = round(float(obj[2]), 1)
                    coco_detections.append(coco_det)

                    # Draw box and label\class_id
                    cv2.rectangle(frame, (int(top_left_x), int(top_left_y)),
                                  (int(bottom_right_x), int(bottom_right_y)),
                                  (255, 0, 0), 2)
                    cv2.putText(
                        frame,
                        str(class_id) + ' ' + str(round(obj[2] * 100, 1)) +
                        ' %', (int(top_left_x), int(top_left_y) - 7),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
            predictions.extend(coco_detections)

        if args.dump_output_video:
            img_resized = cv2.resize(frame, (out_width, out_height))
            out.write(img_resized)

        if args.show:
            cv2.imshow("Detection Results", frame)
            key = cv2.waitKey(args.delay)
            if key == 27:
                break

    if args.dump_predictions_to_json:
        with open(args.output_json_path, 'w') as output_file:
            json.dump(predictions, output_file, sort_keys=True, indent=4)

    cv2.destroyAllWindows()
    del exec_net
    del plugin
Exemplo n.º 29
0
                help="Path to the input configuration file")
ap.add_argument("-i", "--input", help="path to the input video file")
args = vars(ap.parse_args())

# load the configuration file
conf = Conf(args["conf"])

# load the COCO class labels our YOLO model was trained on and
# initialize a list of colors to represent each possible class
# label
LABELS = open(conf["labels_path"]).read().strip().split("\n")
np.random.seed(42)
COLORS = np.random.uniform(0, 255, size=(len(LABELS), 3))

# initialize the plugin in for specified device
plugin = IEPlugin(device="MYRIAD")

# read the IR generated by the Model Optimizer (.xml and .bin files)
print("[INFO] loading models...")
net = IENetwork(model=conf["xml_path"], weights=conf["bin_path"])

# prepare inputs
print("[INFO] preparing inputs...")
inputBlob = next(iter(net.inputs))

# set the default batch size as 1 and get the number of input blobs,
# number of channels, the height, and width of the input blob
net.batch_size = 1
(n, c, h, w) = net.inputs[inputBlob].shape

# if a video path was not supplied, grab a reference to the webcam
def main():
    log.basicConfig(format="[ %(levelname)s ] %(message)s",
                    level=log.INFO,
                    stream=sys.stdout)
    args = build_argparser().parse_args()
    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"
    # Plugin initialization for specified device and load extensions library if specified
    log.info("Initializing plugin for {} device...".format(args.device))
    plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
    if args.cpu_extension and 'CPU' in args.device:
        plugin.add_cpu_extension(args.cpu_extension)
    # Read IR
    log.info("Reading IR...")
    net = IENetwork(model=model_xml, weights=model_bin)

    if plugin.device == "CPU":
        supported_layers = plugin.get_supported_layers(net)
        not_supported_layers = [
            l for l in net.layers.keys() if l not in supported_layers
        ]
        if len(not_supported_layers) != 0:
            log.error(
                "Following layers are not supported by the plugin for specified device {}:\n {}"
                .format(plugin.device, ', '.join(not_supported_layers)))
            log.error(
                "Please try to specify cpu extensions library path in demo's command line parameters using -l "
                "or --cpu_extension command line argument")
            sys.exit(1)
    assert len(
        net.inputs.keys()) == 1, "Demo supports only single input topologies"
    assert len(net.outputs) == 1, "Demo supports only single output topologies"
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))
    log.info("Loading IR to the plugin...")
    exec_net = plugin.load(network=net, num_requests=2)
    # Read and pre-process input image
    n, c, h, w = net.inputs[input_blob].shape
    print(n, c, h, w)
    del net
    if args.input == 'cam':
        input_stream = 0
    else:
        input_stream = args.input
        assert os.path.isfile(args.input), "Specified input file doesn't exist"
    if args.labels:
        with open(args.labels, 'r') as f:
            labels_map = [x.strip() for x in f]
    else:
        labels_map = None

    cap = cv2.VideoCapture(input_stream)
    fps = cap.get(cv2.CAP_PROP_FPS)
    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

    videWriter = cv2.VideoWriter("label.avi",
                                 cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'),
                                 fps, (width, height))

    cur_request_id = 0
    next_request_id = 1

    log.info("Starting inference in async mode...")
    log.info("To switch between sync and async modes press Tab button")
    log.info("To stop the demo execution press Esc button")
    is_async_mode = True
    render_time = 0
    ret, frame = cap.read()
    max_cost_time = 0
    total_cost_time = 0
    cnt = 0
    while cap.isOpened():
        if is_async_mode:
            ret, next_frame = cap.read()
        else:
            ret, frame = cap.read()
        if not ret:
            break
        initial_w = cap.get(3)
        initial_h = cap.get(4)
        # Main sync point:
        # in the truly Async mode we start the NEXT infer request, while waiting for the CURRENT to complete
        # in the regular mode we start the CURRENT request and immediately wait for it's completion
        inf_start = time.time()
        if is_async_mode:
            in_frame = cv2.resize(next_frame, (w, h))
            in_frame = in_frame.transpose(
                (2, 0, 1))  # Change data layout from HWC to CHW
            in_frame = in_frame.reshape((n, c, h, w))
            exec_net.start_async(request_id=next_request_id,
                                 inputs={input_blob: in_frame})
        else:
            in_frame = cv2.resize(frame, (w, h))
            in_frame = in_frame.transpose(
                (2, 0, 1))  # Change data layout from HWC to CHW
            in_frame = in_frame.reshape((n, c, h, w))
            exec_net.start_async(request_id=cur_request_id,
                                 inputs={input_blob: in_frame})
        if exec_net.requests[cur_request_id].wait(-1) == 0:
            inf_end = time.time()
            det_time = inf_end - inf_start
            max_cost_time = det_time if det_time > max_cost_time else max_cost_time
            total_cost_time += det_time
            cnt += 1

            # Parse detection results of the current request
            res = exec_net.requests[cur_request_id].outputs[out_blob]
            for obj in res[0][0]:
                # Draw only objects when probability more than specified threshold
                if obj[2] > args.prob_threshold:
                    xmin = int(obj[3] * initial_w)
                    ymin = int(obj[4] * initial_h)
                    xmax = int(obj[5] * initial_w)
                    ymax = int(obj[6] * initial_h)
                    class_id = int(obj[1])
                    # Draw box and label\class_id
                    color = (min(class_id * 12.5,
                                 255), min(class_id * 7,
                                           255), min(class_id * 5, 255))
                    cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), color, 2)
                    det_label = labels_map[class_id] if labels_map else str(
                        class_id)
                    cv2.putText(
                        frame,
                        det_label + ' ' + str(round(obj[2] * 100, 1)) + ' %',
                        (xmin, ymin - 7), cv2.FONT_HERSHEY_COMPLEX, 0.6, color,
                        1)

            # Draw performance stats
            inf_time_message = "Inference time: N\A for async mode" if is_async_mode else \
                "Inference time: {:.3f} ms".format(det_time * 1000)
            render_time_message = "OpenCV rendering time: {:.3f} ms".format(
                render_time * 1000)
            async_mode_message = "Async mode is on. Processing request {}".format(cur_request_id) if is_async_mode else \
                "Async mode is off. Processing request {}".format(cur_request_id)

            cv2.putText(frame, inf_time_message, (15, 15),
                        cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1)
            cv2.putText(frame, render_time_message, (15, 30),
                        cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)
            cv2.putText(frame, async_mode_message, (10, int(initial_h - 20)),
                        cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)

        #
        render_start = time.time()
        cv2.imshow("Detection Results", frame)
        videWriter.write(frame)
        render_end = time.time()
        render_time = render_end - render_start

        if is_async_mode:
            cur_request_id, next_request_id = next_request_id, cur_request_id
            frame = next_frame

        key = cv2.waitKey(1)
        if key == 27:
            break
        if (9 == key):
            is_async_mode = not is_async_mode
            log.info("Switched to {} mode".format(
                "async" if is_async_mode else "sync"))

    print("Inference Max Cost {:.3f}ms".format(max_cost_time * 1000))
    print("Total Cost {:.3f}ms, mean cost {:.3f}ms, frame cnt {}".format(
        total_cost_time * 1000, total_cost_time / cnt * 1000, cnt))
    cv2.destroyAllWindows()
    del exec_net
    del plugin
Exemplo n.º 31
0
def greengrass_object_detection_sample_ssd_run():
    client.publish(topic=PARAM_TOPIC_NAME, payload="OpenVINO: Initializing...")
    model_bin = os.path.splitext(PARAM_MODEL_XML)[0] + ".bin"

    # Plugin initialization for specified device and load extensions library if specified
    plugin = IEPlugin(device=PARAM_DEVICE, plugin_dirs="")
    if "CPU" in PARAM_DEVICE:
        plugin.add_cpu_extension(PARAM_CPU_EXTENSION_PATH)
    # Read IR
    net = IENetwork.from_ir(model=PARAM_MODEL_XML, weights=model_bin)
    assert len(
        net.inputs.keys()) == 1, "Sample supports only single input topologies"
    assert len(
        net.outputs) == 1, "Sample supports only single output topologies"
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))
    # Read and pre-process input image
    n, c, h, w = net.inputs[input_blob].shape
    cap = cv2.VideoCapture(PARAM_INPUT_SOURCE)
    exec_net = plugin.load(network=net)
    del net
    client.publish(topic=PARAM_TOPIC_NAME,
                   payload="Starting inference on %s" % PARAM_INPUT_SOURCE)
    start_time = timeit.default_timer()
    inf_seconds = 0.0
    frame_count = 0
    labeldata = None
    if PARAM_LABELMAP_FILE is not None:
        with open(PARAM_LABELMAP_FILE) as labelmap_file:
            labeldata = json.load(labelmap_file)

    while (cap.isOpened()):
        ret, frame = cap.read()
        if not ret:
            break
        frameid = cap.get(cv2.CAP_PROP_POS_FRAMES)
        initial_w = cap.get(3)
        initial_h = cap.get(4)
        in_frame = cv2.resize(frame, (w, h))
        in_frame = in_frame.transpose(
            (2, 0, 1))  # Change data layout from HWC to CHW
        in_frame = in_frame.reshape((n, c, h, w))
        # Start synchronous inference
        inf_start_time = timeit.default_timer()
        res = exec_net.infer(inputs={input_blob: in_frame})
        inf_seconds += timeit.default_timer() - inf_start_time
        # Parse detection results of the current request
        res_json = OrderedDict()
        frame_timestamp = datetime.datetime.now()
        object_id = 0
        for obj in res[out_blob][0][0]:
            if obj[2] > 0.5:
                xmin = int(obj[3] * initial_w)
                ymin = int(obj[4] * initial_h)
                xmax = int(obj[5] * initial_w)
                ymax = int(obj[6] * initial_h)
                cv2.rectangle(frame, (xmin, ymin), (xmax, ymax),
                              (255, 165, 20), 4)
                obj_id = "Object" + str(object_id)
                classlabel = labeldata[str(int(obj[1]))] if labeldata else ""
                res_json[obj_id] = {
                    "label": int(obj[1]),
                    "class": classlabel,
                    "confidence": round(obj[2], 2),
                    "xmin": round(obj[3], 2),
                    "ymin": round(obj[4], 2),
                    "xmax": round(obj[5], 2),
                    "ymax": round(obj[6], 2)
                }
                object_id += 1
        frame_count += 1
        # Measure elapsed seconds since the last report
        seconds_elapsed = timeit.default_timer() - start_time
        if seconds_elapsed >= reporting_interval:
            res_json["timestamp"] = frame_timestamp.isoformat()
            res_json["frame_id"] = int(frameid)
            res_json["inference_fps"] = frame_count / inf_seconds
            start_time = timeit.default_timer()
            report(res_json, frame)
            frame_count = 0
            inf_seconds = 0.0

    client.publish(topic=PARAM_TOPIC_NAME,
                   payload="End of the input, exiting...")
    del exec_net
    del plugin
def main():
    log.basicConfig(format="[ %(levelname)s ] %(message)s",
                    level=log.INFO,
                    stream=sys.stdout)
    args = build_argparser().parse_args()
    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"

    # Plugin initialization for specified device and load extensions library if specified
    plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
    if args.cpu_extension and 'CPU' in args.device:
        plugin.add_cpu_extension(args.cpu_extension)
    # Read IR
    log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
    net = IENetwork.from_ir(model=model_xml, weights=model_bin)

    if "CPU" in plugin.device:
        supported_layers = plugin.get_supported_layers(net)
        not_supported_layers = [
            l for l in net.layers.keys() if l not in supported_layers
        ]
        if len(not_supported_layers) != 0:
            log.error(
                "Following layers are not supported by the plugin for specified device {}:\n {}"
                .format(plugin.device, ', '.join(not_supported_layers)))
            log.error(
                "Please try to specify cpu extensions library path in sample's command line parameters using -l "
                "or --cpu_extension command line argument")
            sys.exit(1)

    assert len(
        net.inputs.keys()) == 1, "Sample supports only single input topologies"
    assert len(
        net.outputs) == 1, "Sample supports only single output topologies"

    log.info("Preparing input blobs")
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))
    net.batch_size = 1  # One frame per inference

    # Read and pre-process input images
    n, c, h, w = net.inputs[input_blob].shape
    images = np.ndarray(shape=(n, c, h, w))

    log.info("Batch size is {}".format(n))

    # Loading model to the plugin
    log.info("Loading model to the plugin")
    exec_net = plugin.load(network=net)
    del net

    cap = cv2.VideoCapture(0)
    cap.set(3, 320)
    cap.set(4, 240)
    ret, image = cap.read()

    # Start sync inference
    log.info("Starting inference ({} iterations)".format(args.number_iter))

    step = 0
    while ret:
        step += 1
        ret, image = cap.read()
        cv2.imshow('Raw', cv2.resize(image, (455, 256)))
        if image.shape[:-1] != (h, w):
            image = cv2.resize(image, (w, h))
        image = image.transpose(
            (2, 0, 1))  # Change data layout from HWC to CHW

        images[0] = image
        infer_time = []
        for i in range(args.number_iter):
            t0 = time()
            res = exec_net.infer(inputs={input_blob: images})
            infer_time.append((time() - t0) * 1000)
        log.info("Average running time of one iteration: {} ms".format(
            np.average(np.asarray(infer_time))))
        if args.perf_counts:
            perf_counts = exec_net.requests[0].get_perf_counts()
            log.info("Performance counters:")
            print("{:<70} {:<15} {:<15} {:<15} {:<10}".format(
                'name', 'layer_type', 'exet_type', 'status', 'real_time, us'))
            for layer, stats in perf_counts.items():
                print("{:<70} {:<15} {:<15} {:<15} {:<10}".format(
                    layer, stats['layer_type'], stats['exec_type'],
                    stats['status'], stats['real_time']))
        # Processing output blob
        log.info("Processing output blob")
        res = res[out_blob]
        # Post process output
        for batch, data in enumerate(res):
            # Clip values to [0, 255] range
            data = np.swapaxes(data, 0, 2)
            data = np.swapaxes(data, 0, 1)
            data = cv2.cvtColor(data, cv2.COLOR_BGR2RGB)
            data[data < 0] = 0
            data[data > 255] = 255
            data = data[::] - (args.mean_val_r, args.mean_val_g,
                               args.mean_val_b)
            data /= 255
            cv2.imshow('Video', cv2.resize(data, (455, 256)))
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    del exec_net
    del plugin
def greengrass_object_detection_sample_ssd_run():
    client.publish(topic=PARAM_TOPIC_NAME, payload="OpenVINO: Initializing...")
    model_bin = os.path.splitext(PARAM_MODEL_XML)[0] + ".bin"

    # Plugin initialization for specified device and load extensions library if specified
    plugin = IEPlugin(device=PARAM_DEVICE, plugin_dirs="")
    if "CPU" in PARAM_DEVICE:
        plugin.add_cpu_extension(PARAM_CPU_EXTENSION_PATH)

    # Read IR
    net = IENetwork(model=PARAM_MODEL_XML, weights=model_bin)
    client.publish(topic=PARAM_TOPIC_NAME, payload="OpenVINO: Read IR...")
    assert len(net.inputs.keys()) == 1, "Sample supports only single input topologies"
    assert len(net.outputs) == 1, "Sample supports only single output topologies"
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))

    # Read and pre-process input image
    n, c, h, w = net.inputs[input_blob].shape
    cap = cv2.VideoCapture(PARAM_INPUT_SOURCE)
    exec_net = plugin.load(network=net)
    del net
    client.publish(topic=PARAM_TOPIC_NAME, payload="Starting inference on %s" % PARAM_INPUT_SOURCE)
    start_time = timeit.default_timer()
    inf_seconds = 0.0
    frame_count = 0
    labeldata = None
    if PARAM_LABELMAP_FILE is not None:
        with open(PARAM_LABELMAP_FILE) as labelmap_file:
            labeldata = json.load(labelmap_file)

    while (cap.isOpened()):
        ret, frame = cap.read()
        if not ret:
            break
        frameid = cap.get(cv2.CAP_PROP_POS_FRAMES)
        initial_w = cap.get(3)
        initial_h = cap.get(4)
        in_frame = cv2.resize(frame, (w, h))
        in_frame = in_frame.transpose((2, 0, 1))  # Change data layout from HWC to CHW
        in_frame = in_frame.reshape((n, c, h, w))
        # Start synchronous inference
        inf_start_time = timeit.default_timer()
        res = exec_net.infer(inputs={input_blob: in_frame})
        inf_seconds += timeit.default_timer() - inf_start_time
        # Parse detection results of the current request
        res_json = OrderedDict()
        frame_timestamp = datetime.datetime.now()
        object_id = 0
        for obj in res[out_blob][0][0]:
            if obj[2] > 0.5:
                xmin = int(obj[3] * initial_w)
                ymin = int(obj[4] * initial_h)
                xmax = int(obj[5] * initial_w)
                ymax = int(obj[6] * initial_h)
                cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (255, 165, 20), 4)
                obj_id = "Object" + str(object_id)
                classlabel = labeldata[str(int(obj[1]))] if labeldata else ""
                res_json[obj_id] = {"label": int(obj[1]), "class": classlabel, "confidence": round(obj[2], 2), "xmin": round(
                    obj[3], 2), "ymin": round(obj[4], 2), "xmax": round(obj[5], 2), "ymax": round(obj[6], 2)}
                object_id += 1
        frame_count += 1
        # Measure elapsed seconds since the last report
        seconds_elapsed = timeit.default_timer() - start_time
        if seconds_elapsed >= reporting_interval:
            res_json["timestamp"] = frame_timestamp.isoformat()
            res_json["frame_id"] = int(frameid)
            res_json["inference_fps"] = frame_count / inf_seconds
            start_time = timeit.default_timer()
            report(res_json, frame)
            frame_count = 0
            inf_seconds = 0.0

    client.publish(topic=PARAM_TOPIC_NAME, payload="End of the input, exiting...")
    del exec_net
    del plugin