class FaceDetection:
    '''
    Class for the Face Detection Model.
    '''
    def __init__(self, model_name, device='CPU', extensions=None):
        self.model_weights = model_name + '.bin'
        self.model_structure = model_name + '.xml'
        self.device = device
        self.extensions = extensions
        try:
            self.model = IENetwork(self.model_structure, self.model_weights)
        except Exception as e:
            raise ValueError(
                "Could not Initialise the network. Have you enterred the correct model path?"
            )

        self.input_name = next(iter(self.model.inputs))
        self.input_shape = self.model.inputs[self.input_name].shape
        self.output_name = next(iter(self.model.outputs))
        self.output_shape = self.model.outputs[self.output_name].shape

    def load_model(self):
        self.net = IECore().load_network(network=self.model,
                                         device_name=self.device,
                                         num_requests=1)

    def predict(self, image):
        self.preprocess_image = self.preprocess_input(np.copy(image))
        self.net.start_async(request_id=0,
                             inputs={self.input_name: self.preprocess_image})
        if self.net.requests[0].wait(-1) == 0:
            outputs = self.net.requests[0].outputs
            face_image = self.preprocess_output(outputs, image)
        return face_image

    def preprocess_input(self, image):
        image = cv2.resize(image, (self.input_shape[3], self.input_shape[2]))
        image = image.transpose((2, 0, 1))
        image = image.reshape(1, *image.shape)
        return image

    def preprocess_output(self, outputs, image):
        box = outputs[self.output_name][0][0][0]
        xmin = int(box[3] * image.shape[1])
        ymin = int(box[4] * image.shape[0])
        xmax = int(box[5] * image.shape[1])
        ymax = int(box[6] * image.shape[0])
        return image[ymin:ymax, xmin:xmax]
class HeadPose:
    '''
    Class for the Head Pose Estimation Model.
    '''
    def __init__(self, model_name, device='CPU', extensions=None):
        self.model_weights = model_name + '.bin'
        self.model_structure = model_name + '.xml'
        self.device = device
        self.extensions = extensions
        try:
            self.model = IENetwork(self.model_structure, self.model_weights)
        except Exception as e:
            raise ValueError(
                "Could not Initialise the network. Have you enterred the correct model path?"
            )

        self.input_name = next(iter(self.model.inputs))
        self.input_shape = self.model.inputs[self.input_name].shape
        self.output_name = next(iter(self.model.outputs))
        self.output_shape = self.model.outputs[self.output_name].shape

    def load_model(self):
        self.net = IECore().load_network(network=self.model,
                                         device_name=self.device,
                                         num_requests=1)

    def predict(self, image):
        self.preprocess_image = self.preprocess_input(np.copy(image))
        self.net.start_async(request_id=0,
                             inputs={self.input_name: self.preprocess_image})
        if self.net.requests[0].wait(-1) == 0:
            outputs = self.net.requests[0].outputs
            yaw, pitch, roll = self.preprocess_output(outputs)
        return [[yaw, pitch, roll]]

    def preprocess_input(self, image):
        image = cv2.resize(image, (self.input_shape[3], self.input_shape[2]))
        image = image.transpose((2, 0, 1))
        image = image.reshape(1, *image.shape)
        return image

    def preprocess_output(self, outputs):
        return outputs['angle_y_fc'], outputs['angle_p_fc'], outputs[
            'angle_r_fc']
class GazeEstimation:
    '''
    Class for the Head Pose Estimation Model.
    '''
    def __init__(self, model_name, device='CPU', extensions=None):
        self.model_weights=model_name+'.bin'
        self.model_structure=model_name+'.xml'
        self.device=device
        self.extensions=extensions
        try:
            self.model=IENetwork(self.model_structure, self.model_weights)
        except Exception as e:
            raise ValueError("Could not Initialise the network. Have you enterred the correct model path?")

        self.input_name=list(self.model.inputs.keys())
        self.input_shape = self.model.inputs['left_eye_image'].shape
        self.output_name=next(iter(self.model.outputs))
        self.output_shape=self.model.outputs[self.output_name].shape
        
    def load_model(self):
        self.net = IECore().load_network(network = self.model, device_name = self.device,num_requests=1)

    def predict(self, left_eye_image,right_eye_image,head_pose_angles):
        self.preprocess_left_eye_image  = self.preprocess_input(np.copy(left_eye_image))
        self.preprocess_right_eye_image  = self.preprocess_input(np.copy(right_eye_image))
        self.head_pose_angles = head_pose_angles
        self.net.start_async(request_id=0, inputs={'head_pose_angles': self.head_pose_angles,\
                                                 'left_eye_image': self.preprocess_left_eye_image,\
                                                  'right_eye_image': self.preprocess_right_eye_image})
        if self.net.requests[0].wait(-1) == 0:
            outputs = self.net.requests[0].outputs
            outputs = self.preprocess_output(outputs)
        return outputs

    def preprocess_input(self, image):
        image = cv2.resize(image, (self.input_shape[3], self.input_shape[2]))
        image = image.transpose((2,0,1))
        image = image.reshape(1, *image.shape)
        return image

    def preprocess_output(self, outputs):
        return outputs[self.output_name].squeeze()
示例#4
0
def main():
    log.basicConfig(format="[ %(levelname)s ] %(message)s",
                    level=log.INFO,
                    stream=sys.stdout)
    args = build_argparser().parse_args()
    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"
    # Plugin initialization for specified device and load extensions library if specified
    log.info("Initializing plugin for {} device...".format(args.device))
    #plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
    #if args.cpu_extension and 'CPU' in args.device:
    #   plugin.add_cpu_extension(args.cpu_extension)
    # Read IR
    log.info("Reading IR...")
    net = IECore().read_network(model=model_xml, weights=model_bin)

    if "CPU" in args.device:
        supported_layers = IECore().query_network(net, "CPU")
        not_supported_layers = [
            l for l in net.layers.keys() if l not in supported_layers
        ]
        if len(not_supported_layers) != 0:
            log.error(
                "Following layers are not supported by the plugin for specified device {}:\n {}"
                .format(args.device, ', '.join(not_supported_layers)))
            log.error(
                "Please try to specify cpu extensions library path in demo's command line parameters using -l "
                "or --cpu_extension command line argument")
            sys.exit(1)
    assert len(
        net.inputs.keys()) == 1, "Demo supports only single input topologies"
    assert len(net.outputs) == 1, "Demo supports only single output topologies"
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))

    #Added here
    input_blob2 = next(iter(net.inputs))
    out_blob2 = next(iter(net.outputs))

    log.info("Loading IR to the plugin...")
    exec_net = IECore().load_network(network=net,
                                     device_name=args.device,
                                     num_requests=2)
    # Read and pre-process input image
    n, c, h, w = net.inputs[input_blob].shape
    #Added here
    n2, c2, h2, w2 = net.inputs[input_blob2].shape
    del net
    #added from here
    if args.input == 'cam':
        input_stream = 0
    elif args.input == 'gstreamer':
        #gst rtp sink
        input_stream = 'udpsrc port=5000 caps = " application/x-rtp, encoding-name=JPEG,payload=26" ! rtpjpegdepay ! decodebin ! videoconvert ! appsink'
    else:
        input_stream = args.input
        assert os.path.isfile(args.input), "Specified input file doesn't exist"
    if args.labels:
        with open(args.labels, 'r') as f:
            labels_map = [x.strip() for x in f]
    else:
        labels_map = None

    if input_stream == 'gstreamer':
        cap = cv2.VideoCapture(input_stream, cv2.CAP_GSTREAMER)
    else:
        cap = cv2.VideoCapture(input_stream)

    if args.input2 == 'cam':
        input_stream2 = 0
    elif args.input2 == 'gstreamer':
        #gst rtp sink
        input_stream2 = 'udpsrc port=5001 caps = " application/x-rtp, encoding-name=JPEG,payload=26" ! rtpjpegdepay ! decodebin ! videoconvert ! appsink'
    else:
        input_stream2 = args.input2
        assert os.path.isfile(
            args.input2), "Specified input file doesn't exist"
    if args.labels:
        with open(args.labels, 'r') as f:
            labels_map = [x.strip() for x in f]
    else:
        labels_map = None

    if input_stream2 == 'gstreamer':
        cap2 = cv2.VideoCapture(input_stream2, cv2.CAP_GSTREAMER)
    else:
        cap2 = cv2.VideoCapture(input_stream2)

    cur_request_id = 0
    next_request_id = 1

    #Added here
    cur_request_id2 = 1
    next_request_id2 = 0

    log.info("Starting inference in async mode...")
    log.info("To switch between sync and async modes press Tab button")
    log.info("To stop the demo execution press Esc button")

    #Async doesn't work if True
    #Request issues = Runtime Error: [REQUEST BUSY]
    is_async_mode = False
    render_time = 0
    ret, frame = cap.read()
    ret2, frame2 = cap2.read()

    #Montage width and height
    #In this case means 2x1 boxes
    mW = 2
    mH = 1

    frameList = []

    print(
        "To close the application, press 'CTRL+C' or any key with focus on the output window"
    )
    while cap.isOpened() or cap2.isOpened():
        if is_async_mode:
            ret, next_frame = cap.read()
            ret2, next_frame2 = cap2.read()
        else:
            ret, frame = cap.read()
            ret2, frame2 = cap2.read()
        if (not (ret and ret2)):
            break
        initial_w = cap.get(3)
        initial_h = cap.get(4)
        initial_w2 = cap2.get(3)
        initial_h2 = cap2.get(4)
        # Main sync point:
        # in the truly Async mode we start the NEXT infer request, while waiting for the CURRENT to complete
        # in the regular mode we start the CURRENT request and immediately wait for it's completion
        inf_start = time.time()
        if is_async_mode:
            if (ret and ret2):
                in_frame = cv2.resize(next_frame, (w, h))
                in_frame = in_frame.transpose(
                    (2, 0, 1))  # Change data layout from HWC to CHW
                in_frame = in_frame.reshape((n, c, h, w))
                exec_net.start_async(request_id=next_request_id,
                                     inputs={input_blob: in_frame})

                in_frame2 = cv2.resize(next_frame2, (w2, h2))
                in_frame2 = in_frame2.transpose(
                    (2, 0, 1))  # Change data layout from HWC to CHW
                in_frame2 = in_frame2.reshape((n2, c2, h2, w2))
                exec_net.start_async(request_id=next_request_id2,
                                     inputs={input_blob2: in_frame2})

        else:
            if (ret and ret2):
                in_frame = cv2.resize(frame, (w, h))
                in_frame = in_frame.transpose(
                    (2, 0, 1))  # Change data layout from HWC to CHW
                in_frame = in_frame.reshape((n, c, h, w))
                exec_net.start_async(request_id=cur_request_id,
                                     inputs={input_blob: in_frame})

                in_frame2 = cv2.resize(frame2, (w2, h2))
                in_frame2 = in_frame2.transpose(
                    (2, 0, 1))  # Change data layout from HWC to CHW
                in_frame2 = in_frame2.reshape((n2, c2, h2, w2))
                exec_net.start_async(request_id=cur_request_id2,
                                     inputs={input_blob2: in_frame2})

        if exec_net.requests[cur_request_id].wait(
                -1) == 0 and exec_net.requests[cur_request_id2].wait(-1) == 0:
            inf_end = time.time()
            det_time = inf_end - inf_start

            # Parse detection results of the current request
            res = exec_net.requests[cur_request_id].outputs[out_blob]
            res2 = exec_net.requests[cur_request_id2].outputs[out_blob2]

            for obj in res[0][0]:
                # Draw only objects when probability more than specified threshold
                if obj[2] > args.prob_threshold:
                    xmin = int(obj[3] * initial_w)
                    ymin = int(obj[4] * initial_h)
                    xmax = int(obj[5] * initial_w)
                    ymax = int(obj[6] * initial_h)
                    class_id = int(obj[1])
                    # Draw box and label\class_id
                    color = (min(class_id * 12.5,
                                 255), min(class_id * 7,
                                           255), min(class_id * 5, 255))
                    cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), color, 2)
                    det_label = labels_map[class_id] if labels_map else str(
                        class_id)
                    cv2.putText(
                        frame,
                        det_label + ' ' + str(round(obj[2] * 100, 1)) + ' %',
                        (xmin, ymin - 7), cv2.FONT_HERSHEY_COMPLEX, 0.6, color,
                        1)
            for obj in res2[0][0]:
                # Draw only objects when probability more than specified threshold
                if obj[2] > args.prob_threshold:
                    xmin = int(obj[3] * initial_w2)
                    ymin = int(obj[4] * initial_h2)
                    xmax = int(obj[5] * initial_w2)
                    ymax = int(obj[6] * initial_h2)
                    class_id = int(obj[1])
                    # Draw box and label\class_id
                    color = (min(class_id * 12.5,
                                 255), min(class_id * 7,
                                           255), min(class_id * 5, 255))
                    cv2.rectangle(frame2, (xmin, ymin), (xmax, ymax), color, 2)
                    det_label = labels_map[class_id] if labels_map else str(
                        class_id)
                    cv2.putText(
                        frame2,
                        det_label + ' ' + str(round(obj[2] * 100, 1)) + ' %',
                        (xmin, ymin - 7), cv2.FONT_HERSHEY_COMPLEX, 0.6, color,
                        1)

            # Draw performance stats
            inf_time_message = "Inference time: N\A for async mode" if is_async_mode else \
                "Inference time: {:.3f} ms".format(det_time * 1000)
            render_time_message = "OpenCV rendering time: {:.3f} ms".format(
                render_time * 1000)
            async_mode_message = "Async mode is on. Processing request {}".format(cur_request_id) if is_async_mode else \
                "Async mode is off. Processing request {}".format(cur_request_id)

            cv2.putText(frame, inf_time_message, (15, 15),
                        cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1)
            cv2.putText(frame, render_time_message, (15, 30),
                        cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)
            cv2.putText(frame, async_mode_message, (10, int(initial_h - 20)),
                        cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)

            cv2.putText(frame2, inf_time_message, (15, 15),
                        cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1)
            cv2.putText(frame2, render_time_message, (15, 30),
                        cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)
            cv2.putText(frame2, async_mode_message, (10, int(initial_h - 20)),
                        cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)

        render_start = time.time()

        #Added here
        #Add the frame into a list

        if (ret and ret2):
            frameList.append(frame)
            frameList.append(frame2)

        #build_montages function from imutils to display 2 cameras on a single dashboard

        montages = build_montages(frameList, (640, 480), (mW, mH))

        for montage in montages:
            cv2.imshow("Detection results", montage)

        #cv2.imshow("Detection Results", frame)
        render_end = time.time()
        render_time = render_end - render_start

        if is_async_mode:
            cur_request_id, next_request_id = next_request_id, cur_request_id

            frame = next_frame
            frame2 = next_frame2
        key = cv2.waitKey(1)
        if key == 27:
            break
        if (9 == key):
            is_async_mode = not is_async_mode
            log.info("Switched to {} mode".format(
                "async" if is_async_mode else "sync"))

    cap.release()
    cap2.release()
    cv2.destroyAllWindows()
示例#5
0
class PersonDetect:
    '''
    Class for the Person Detection Model.
    '''
    def __init__(self, model_name, device, threshold=0.60):
        self.model_weights = model_name + '.bin'
        self.model_structure = model_name + '.xml'
        self.device = device
        self.threshold = threshold

        try:
            self.model = IENetwork(self.model_structure, self.model_weights)
        except Exception as e:
            raise ValueError(
                "Could not Initialise the network. Have you enterred the correct model path?"
            )

        self.input_name = next(iter(self.model.inputs))
        self.input_shape = self.model.inputs[self.input_name].shape
        self.output_name = next(iter(self.model.outputs))
        self.output_shape = self.model.outputs[self.output_name].shape

    def load_model(self):
        self.net = IECore().load_network(network=self.model,
                                         device_name=self.device)

    def predict(self, image):
        processed_image = self.preprocess_inputs(image)
        self.net.start_async(0, inputs={self.input_name: processed_image})
        if self.net.requests[0].wait() == 0:
            result = self.net.requests[0].outputs[self.output_name]

        outputs = self.preprocess_outputs(result, image)
        image = self.draw_outputs(outputs, image)

        return outputs, image

    def draw_outputs(self, coords, image):
        frame = image.copy()
        for data in coords:
            cv2.rectangle(frame, data[:2], data[2:], (0, 255, 0), 2)

        return frame

    def preprocess_outputs(self, outputs, image):
        h, w = image.shape[0:2]
        coords = []
        for data in outputs[0][0]:
            # Draw bounding box for object
            if data[2] >= self.threshold:
                xmin = int(data[3] * w)
                ymin = int(data[4] * h)
                xmax = int(data[5] * w)
                ymax = int(data[6] * h)
                coords.append((xmin, ymin, xmax, ymax))

        return coords

    def preprocess_inputs(self, image):
        p_frame = cv2.resize(image, (self.input_shape[3], self.input_shape[2]))
        p_frame = p_frame.transpose(2, 0, 1)
        p_frame = p_frame.reshape(1, *p_frame.shape)
        return p_frame