def load_model(self):
        '''
        TODO: You will need to complete this method.
        This method is for loading the model to the device specified by the user.
        If your model requires any Plugins, this is where you can load them.
        '''
        self.plugin = IECore()

        if self.extension and self.device == 'CPU':
            self.plugin.add_extension(self.extension, self.device)

            # Check for supported layers ###
            supported_layers = self.plugin.query_network(
                network=self.model, device_name=self.device)

            unsupported_layers = [
                l for l in self.model.layers.keys()
                if l not in supported_layers
            ]

            if len(unsupported_layers) != 0:
                logger.error(
                    "Unsupported layers found: {}".format(unsupported_layers))
                logger.error(
                    "Check whether extensions are available to add to IECore.")
                exit(1)

        self.net = self.plugin.load_network(network=self.model,
                                            device_name=self.device,
                                            num_requests=1)

        return self.net
Beispiel #2
0
 def load_model(self):
     core = IECore()
     if self.extensions != None:
         core.add_extension(self.extensions, self.device)
     self.net = core.load_network(network=self.model,
                                  device_name=self.device,
                                  num_requests=1)
 def __init__(self, model_name, device='CPU', extensions=None):
     """
     Initialize
     :param model_name: model path
     :param device: device to use
     :param extensions: extensions
     """
     # model_weights
     self.model_bin = model_name + ".bin"
     # model_structure
     self.model_xml = model_name + ".xml"
     # device to use
     self.device = device
     self.plugin = IECore()
     self.network = None
     self.net_input = None
     # extensions to use
     self.extensions = extensions
     # Get the input layer
     self.input_blob = None
     self.input_shape = None
     self.output_blob = None
     self.exec_network = None
     self.infer_request = None
     # name of the model extended
     self.model_name = None
Beispiel #4
0
 def __init__(self, model_name, device='CPU', extensions=None):
     '''
     TODO: Use this to set your instance variables.
     '''
     self.model_weights = model_name + '.bin'
     self.model_structure = model_name + '.xml'
     self.device = device
     self.extension = extensions
     core = IECore()
     self.model = core.read_network(self.model_structure,
                                    self.model_weights)
     self.input_name = next(iter(self.model.inputs))
     self.input_shape = self.model.inputs[self.input_name].shape
     self.output_name = next(iter(self.model.outputs))
     self.output_shape = self.model.outputs[self.output_name].shape
Beispiel #5
0
import yaml
import cv2
import numpy as np
import pandas as pd
from PIL import Image
from numpy import count_nonzero, vstack, newaxis, argmax, where
from openvino.inference_engine.ie_api import IECore
from sklearn.metrics.pairwise import cosine_similarity
from tqdm import tqdm

from dl_src.dnn import get_data_frame_from_folder, DNN

opj = os.path.join
image_extensions = ['.png', '.jpg']
del_label = 'DELETE'
ie = IECore()

# TODO Make size output


def make_list_of_files_by_extension(source,
                                    extensions=None,
                                    escape_copies=True):
    if extensions is None:
        extensions = ('.jpg', '.png')
    ck = set()
    q = Queue()
    q.put(source)
    paths = []
    file_names = []
    while not q.empty():
Beispiel #6
0
from openvino.inference_engine.ie_api import IECore
import os

xml = '/home/imt/work/pyqt5-images-classifier/dataset/save/FP16/checkpoint-best.xml'
ie = IECore()
n = ie.read_network(model=xml, weights=os.path.splitext(xml)[0] + '.bin')
z = ie.load_network(n, device_name='CPU')
it = next(iter(z.inputs))
print(it, z.inputs[it].shape)
ot = next(iter(z.outputs))
print(ot, z.outputs[ot].shape)



class face_detection:
    '''
    Class for the Face Detection Model.
    '''
    def __init__(self,
                 model_name,
                 device='CPU',
                 threshold=0.5,
                 extensions=None):
        '''
        TODO: Use this to set your instance variables.
        '''
        self.model_weights = model_name + '.bin'
        self.model_structure = model_name + '.xml'
        self.device = device
        self.threshold = threshold
        self.extension = extensions
        core = IECore()
        self.model = core.read_network(self.model_structure,
                                       self.model_weights)
        self.input_name = next(iter(self.model.inputs))
        self.input_shape = self.model.inputs[self.input_name].shape
        self.output_name = next(iter(self.model.outputs))
        self.output_shape = self.model.outputs[self.output_name].shape
        #raise NotImplementedError

    def load_model(self):
        '''
        TODO: You will need to complete this method.
        This method is for loading the model to the device specified by the user.
        If your model requires any Plugins, this is where you can load them.
        '''
        self.plugin = IECore()

        if self.extension and self.device == 'CPU':
            self.plugin.add_extension(self.extension, self.device)

            # Check for supported layers ###
            supported_layers = self.plugin.query_network(
                network=self.model, device_name=self.device)

            unsupported_layers = [
                l for l in self.model.layers.keys()
                if l not in supported_layers
            ]

            if len(unsupported_layers) != 0:
                logger.error(
                    "Unsupported layers found: {}".format(unsupported_layers))
                logger.error(
                    "Check whether extensions are available to add to IECore.")
                exit(1)

        self.net = self.plugin.load_network(network=self.model,
                                            device_name=self.device,
                                            num_requests=1)

        return self.net
        #raise NotImplementedError

    def predict(self, image):
        '''
        TODO: You will need to complete this method.
        This method is meant for running predictions on the input image.
        '''
        preprocess_image = self.preprocess_input(image)

        results = self.net.infer({self.input_name: preprocess_image})

        coords = self.preprocess_output(results, image.shape[1],
                                        image.shape[0])

        if len(coords) > 0:

            coord = coords[0]  # retrieve first face image

            cropped_image = image[coord[1]:coord[3], coord[0]:coord[2]]

        return cropped_image, coord
        #raise NotImplementedError

    def check_model(self):
        raise NotImplementedError

    def preprocess_input(self, image):
        '''
        Before feeding the data into the model for inference,
        you might have to preprocess it. This function is where you can do that.
        '''
        image = cv2.resize(image, (self.input_shape[3], self.input_shape[2]))
        image = image.transpose((2, 0, 1))
        image = image.reshape(1, *image.shape)

        return image
        #raise NotImplementedError

    def preprocess_output(self, outputs, width, height):
        '''
        Before feeding the output of this model to the next model,
        you might have to preprocess the output. This function is where you can do that.
        '''
        coords = []

        output = outputs['detection_out']

        for box in output[0][0]:

            if box[2] > self.threshold:
                #box_lst.append(box)
                xmin = int(box[3] * width)
                ymin = int(box[4] * height)
                xmax = int(box[5] * width)
                ymax = int(box[6] * height)

                coords.append([xmin, ymin, xmax, ymax])

        return coords
Beispiel #8
0
class facial_landmarks_detection:
    '''
    Class for the Face Detection Model.
    '''
    def __init__(self, model_name, device='CPU', extensions=None):
        '''
        TODO: Use this to set your instance variables.
        '''
        self.model_weights = model_name + '.bin'
        self.model_structure = model_name + '.xml'
        self.device = device
        self.extension = extensions
        core = IECore()
        self.model = core.read_network(self.model_structure,
                                       self.model_weights)
        self.input_name = next(iter(self.model.inputs))
        self.input_shape = self.model.inputs[self.input_name].shape
        self.output_name = next(iter(self.model.outputs))
        self.output_shape = self.model.outputs[self.output_name].shape
        #raise NotImplementedError

    def load_model(self):
        '''
        TODO: You will need to complete this method.
        This method is for loading the model to the device specified by the user.
        If your model requires any Plugins, this is where you can load them.
        '''
        self.plugin = IECore()

        if self.extension and self.device == 'CPU':
            self.plugin.add_extension(self.extension, self.device)

            # Check for supported layers ###
            supported_layers = self.plugin.query_network(
                network=self.model, device_name=self.device)

            unsupported_layers = [
                l for l in self.model.layers.keys()
                if l not in supported_layers
            ]

            if len(unsupported_layers) != 0:
                logger.error(
                    "Unsupported layers found: {}".format(unsupported_layers))
                logger.error(
                    "Check whether extensions are available to add to IECore.")
                exit(1)

        self.net = self.plugin.load_network(network=self.model,
                                            device_name=self.device,
                                            num_requests=1)

        return self.net
        #raise NotImplementedError

    def predict(self, image):
        '''
        TODO: You will need to complete this method.
        This method is meant for running predictions on the input image.
        '''
        preprocess_image = self.preprocess_input(image)

        results = self.net.infer({self.input_name: preprocess_image})

        eye_coords = self.preprocess_output(results, image.shape[1],
                                            image.shape[0])

        lefteye_x_min = eye_coords['left_eye_x_coord'] - 10
        lefteye_x_max = eye_coords['left_eye_x_coord'] + 10
        lefteye_y_min = eye_coords['left_eye_y_coord'] - 10
        lefteye_y_max = eye_coords['left_eye_y_coord'] + 10

        righteye_x_min = eye_coords['right_eye_x_coord'] - 10
        righteye_x_max = eye_coords['right_eye_x_coord'] + 10
        righteye_y_min = eye_coords['right_eye_y_coord'] - 10
        righteye_y_max = eye_coords['right_eye_y_coord'] + 10

        eye_coord = [[
            lefteye_x_min, lefteye_y_min, lefteye_x_max, lefteye_y_max
        ], [righteye_x_min, righteye_y_min, righteye_x_max, righteye_y_max]]

        left_eye_image = image[lefteye_x_min:lefteye_x_max,
                               lefteye_y_min:lefteye_y_max]
        right_eye_image = image[righteye_x_min:righteye_x_max,
                                righteye_y_min:righteye_y_max]

        return left_eye_image, right_eye_image, eye_coord
        #raise NotImplementedError

    def check_model(self):
        raise NotImplementedError

    def preprocess_input(self, image):
        '''
        Before feeding the data into the model for inference,
        you might have to preprocess it. This function is where you can do that.
        '''
        image = cv2.resize(image, (self.input_shape[3], self.input_shape[2]))
        image = image.transpose(2, 0, 1)
        image = image.reshape(1, *image.shape)
        return image
        #raise NotImplementedError

    def preprocess_output(self, outputs, width, height):
        '''
        Before feeding the output of this model to the next model,
        you might have to preprocess the output. This function is where you can do that.
        '''
        eye_coords = {}

        outputs = outputs[self.output_name][0]

        eye_coords['left_eye_x_coord'] = int(outputs[0] * width)
        eye_coords['left_eye_y_coord'] = int(outputs[1] * height)
        eye_coords['right_eye_x_coord'] = int(outputs[2] * width)
        eye_coords['right_eye_y_coord'] = int(outputs[3] * height)

        return eye_coords
class head_pose_estimation:
    '''
    Class for the Face Detection Model.
    '''
    def __init__(self, model_name, device='CPU', extensions=None):
        '''
        TODO: Use this to set your instance variables.
        '''
        self.model_weights = model_name + '.bin'
        self.model_structure = model_name + '.xml'
        self.device = device
        self.extension = extensions
        core = IECore()
        self.model = core.read_network(self.model_structure,
                                       self.model_weights)
        self.input_name = next(iter(self.model.inputs))
        self.input_shape = self.model.inputs[self.input_name].shape
        self.output_name = next(iter(self.model.outputs))
        self.output_shape = self.model.outputs[self.output_name].shape
        #raise NotImplementedError

    def load_model(self):
        '''
        TODO: You will need to complete this method.
        This method is for loading the model to the device specified by the user.
        If your model requires any Plugins, this is where you can load them.
        '''
        self.plugin = IECore()

        if self.extension and self.device == 'CPU':
            self.plugin.add_extension(self.extension, self.device)

            # Check for supported layers ###
            supported_layers = self.plugin.query_network(
                network=self.model, device_name=self.device)

            unsupported_layers = [
                l for l in self.model.layers.keys()
                if l not in supported_layers
            ]

            if len(unsupported_layers) != 0:
                logger.error(
                    "Unsupported layers found: {}".format(unsupported_layers))
                logger.error(
                    "Check whether extensions are available to add to IECore.")
                exit(1)

        self.net = self.plugin.load_network(network=self.model,
                                            device_name=self.device,
                                            num_requests=1)

        return self.net
        #raise NotImplementedError

    def predict(self, image):
        '''
        TODO: You will need to complete this method.
        This method is meant for running predictions on the input image.
        '''
        preprocess_image = self.preprocess_input(image)

        results = self.net.infer({self.input_name: preprocess_image})

        output_lst = self.preprocess_output(results)

        return output_lst
        #raise NotImplementedError

    def check_model(self):
        raise NotImplementedError

    def preprocess_input(self, image):
        '''
        Before feeding the data into the model for inference,
        you might have to preprocess it. This function is where you can do that.
        '''
        image = cv2.resize(image, (self.input_shape[3], self.input_shape[2]))
        image = image.transpose((2, 0, 1))
        image = image.reshape(1, *image.shape)

        return image
        #raise NotImplementedError

    def preprocess_output(self, outputs):
        '''
        Before feeding the output of this model to the next model,
        you might have to preprocess the output. This function is where you can do that.
        '''
        output_lst = []
        output_lst.append(outputs['angle_y_fc'].tolist()[0][0])
        output_lst.append(outputs['angle_p_fc'].tolist()[0][0])
        output_lst.append(outputs['angle_r_fc'].tolist()[0][0])

        return output_lst
class BaseModel:
    def __init__(self, model_name, device='CPU', extensions=None):
        """
        Initialize
        :param model_name: model path
        :param device: device to use
        :param extensions: extensions
        """
        # model_weights
        self.model_bin = model_name + ".bin"
        # model_structure
        self.model_xml = model_name + ".xml"
        # device to use
        self.device = device
        self.plugin = IECore()
        self.network = None
        self.net_input = None
        # extensions to use
        self.extensions = extensions
        # Get the input layer
        self.input_blob = None
        self.input_shape = None
        self.output_blob = None
        self.exec_network = None
        self.infer_request = None
        # name of the model extended
        self.model_name = None

    def load_model(self):
        """
        To load the model to the specified hardware
        :return:None
        """
        try:
            log.info("Loading {} IR to the plugin.".format(self.model_name))
            self.network = IENetwork(model=self.model_xml,
                                     weights=self.model_bin)
            self.input_blob = next(iter(self.network.inputs))
            self.input_shape = self.network.inputs[self.input_blob].shape
            self.output_blob = next(iter(self.network.outputs))
            # check model for unsupported layers
            self.check_model()
            # Load the network into the Inference Engine
            self.exec_network = self.plugin.load_network(
                self.network, self.device)
        except Exception as e:
            log.error("The loading of the model cannot be completed!".format(
                self.model_name))
            log.error("Exception message during {} load : {}".format(
                self.model_name, e))

    def check_model(self):
        """
        To check the model for unsupported layers and apply necessary extensions
        :return:None
        """
        # Check for supported layers
        supported_layers = self.plugin.query_network(network=self.network,
                                                     device_name=self.device)
        unsupported_layers = [
            l for l in self.network.layers.keys() if l not in supported_layers
        ]
        if len(unsupported_layers) != 0:
            log.debug("Unsupported network layers found!")
            # Add necessary extensions
            self.plugin.add_extension(self.extensions, self.device)

    def preprocess_input(self, image):
        """
        To preprocess the input for the model.
        :param image: input frame
        :return: transformed input frame
        """
        # Pre-process the frame
        if self.model_name == "Gaze estimation Model":
            image = cv2.resize(image, (60, 60))
        else:
            image = cv2.resize(image,
                               (self.input_shape[3], self.input_shape[2]))
        # Change format from HWC to CHW
        image_to_infer = image.transpose((2, 0, 1))
        # prepare according to face_detection model
        image_to_infer = image_to_infer.reshape(1, *image_to_infer.shape)
        return image_to_infer

    def predict(self):
        """
        Perform the inference request.
        :return:None
        """
        # make a infer request
        self.infer_request = self.exec_network.start_async(
            0, inputs=self.net_input)

    def wait(self):
        """
        Wait for the request to be complete.
        :return:status of the inference request
        """
        status = self.exec_network.requests[0].wait(-1)
        return status

    def get_network_input_shape(self):
        """
        To get the network input keys and corresponding shape
        :return:None
        """
        input_name = [i for i in self.network.inputs.keys()]
        log.debug("The network {} input name : {}".format(
            self.model_name, input_name))
        for name in input_name:
            input_shape = self.network.inputs[name].shape
            log.debug("The input shape for {} is {}".format(name, input_shape))

    def get_network_output_shape(self):
        """
        To get the network output keys
        :return:None
        """
        output_name = [i for i in self.network.outputs.keys()]
        log.debug("The network {} output name : {}".format(
            self.model_name, output_name))

    @abstractmethod
    def set_net_input(self):
        """
        Prepare the network input according to the model specification
        :return: None
        """
        pass
Beispiel #11
0
class gaze_estimation:
    '''
    Class for the Face Detection Model.
    '''
    def __init__(self, model_name, device='CPU', extensions=None):
        '''
        TODO: Use this to set your instance variables.
        '''
        self.model_weights = model_name + '.bin'
        self.model_structure = model_name + '.xml'
        self.device = device
        self.extension = extensions
        core = IECore()
        self.model = core.read_network(self.model_structure,
                                       self.model_weights)
        self.input_name = next(iter(self.model.inputs))
        self.input_shape = self.model.inputs[self.input_name].shape
        self.output_name = next(iter(self.model.outputs))
        self.output_shape = self.model.outputs[self.output_name].shape

        #raise NotImplementedError

    def load_model(self):
        '''
        TODO: You will need to complete this method.
        This method is for loading the model to the device specified by the user.
        If your model requires any Plugins, this is where you can load them.
        '''
        self.plugin = IECore()

        if self.extension and self.device == 'CPU':
            self.plugin.add_extension(self.extension, self.device)

            # Check for supported layers ###
            supported_layers = self.plugin.query_network(
                network=self.model, device_name=self.device)

            unsupported_layers = [
                l for l in self.model.layers.keys()
                if l not in supported_layers
            ]

            if len(unsupported_layers) != 0:
                logger.error(
                    "Unsupported layers found: {}".format(unsupported_layers))
                logger.error(
                    "Check whether extensions are available to add to IECore.")
                exit(1)

        self.net = self.plugin.load_network(network=self.model,
                                            device_name=self.device,
                                            num_requests=1)

        return self.net
        #raise NotImplementedError

    def predict(self, left_eye_image, right_eye_image, head_pose_output):
        '''
        TODO: You will need to complete this method.
        This method is meant for running predictions on the input image.
        '''
        left_eye_image_preprocess, right_eye_image_preprocess = self.preprocess_input(
            left_eye_image, right_eye_image)

        self.results = self.net.infer(
            inputs={
                'left_eye_image': left_eye_image_preprocess,
                'right_eye_image': right_eye_image_preprocess,
                'head_pose_angles': head_pose_output
            })

        mouse_coord, gaze_vector = self.preprocess_output(
            self.results, head_pose_output)

        return mouse_coord, gaze_vector
        #raise NotImplementedError

    def check_model(self):
        raise NotImplementedError

    def preprocess_input(self, left_eye_image, right_eye_image):
        '''
        Before feeding the data into the model for inference,
        you might have to preprocess it. This function is where you can do that.
        '''
        left_eye_image_preprocess = cv2.resize(left_eye_image, (60, 60))
        left_eye_image_preprocess = left_eye_image_preprocess.transpose(
            (2, 0, 1))
        left_eye_image_preprocess = left_eye_image_preprocess.reshape(
            1, *left_eye_image_preprocess.shape)

        right_eye_image_preprocess = cv2.resize(right_eye_image, (60, 60))
        right_eye_image_preprocess = right_eye_image_preprocess.transpose(
            (2, 0, 1))
        right_eye_image_preprocess = right_eye_image_preprocess.reshape(
            1, *right_eye_image_preprocess.shape)

        return left_eye_image_preprocess, right_eye_image_preprocess
        #raise NotImplementedError

    def preprocess_output(self, outputs, head_pose_estimation_output):
        '''
        Before feeding the output of this model to the next model,
        you might have to preprocess the output. This function is where you can do that.
        '''
        roll_value = head_pose_estimation_output[2]
        outputs = outputs[self.output_name][0]

        cos_theta = math.cos(roll_value * math.pi / 180)
        sin_theta = math.sin(roll_value * math.pi / 180)

        x_value = outputs[0] * cos_theta + outputs[1] * sin_theta
        y_value = outputs[1] * cos_theta - outputs[0] * sin_theta

        return (x_value, y_value), outputs