class FaceDetectionModel:
    '''
    Class for Face Detection Model and attributes.
    '''
    def __int__(self,
                model_name,
                threshold,
                device='CPU',
                extensions=None,
                async_mode=True,
                plugin=None):
        '''
        TODO: Use this to set your instance variables.
        '''
        self.plugin = None
        self.network = None
        self.input_blob = None
        self.output_blob = None
        self.out_shape = None
        self.exec_network = None
        self.threshold = threshold
        self.device = device
        self.async_mode = async_mode
        self.infer_request = None
        self.net.plugin = None
        self.net = None
        self.model_name = model_name
        self.extensions = extensions

    def load_model(self, model_xml, cpu_extension=None):
        '''
        TO load models
        '''
        self.model_xml = model_name
        model_bin = os.path.splitext(model_xml)[0] + ".bin"
        self.device = device
        self.extensions = extensions

        # Initialize the plugin
        self.plugin = IECore()
        self.plugin = IEPlugin(device=self.device)

        #self.plugin = IEPlugin(device=self.device)
        # Add a CPU extension, and any neccessary extension

        if cpu_extension and "CPU" in device:
            self.plugin.add_extension(cpu_extension, device)

        # Reading the Intermediate Representation (IR) model as a IENetwork
        # IENetwork deprecated in 2020 version
        self.network = self.plugin.read_network(self.model_xml,
                                                weights=model_bin)
        self.check_plugin(self.plugin)

        ## Check for supported layers
        supported_layers = self.plugin.query_network(network=self.network,
                                                     device_name=device)
        ## check for unsupported layers
        unsupported_layers = [
            l for l in self.network.layers.keys()
            if l not in self.plugin.get_supported_layers(self.network)
        ]
        if len(unsupported_layers) != 0:
            print("Unsupported layers found: {}".format(unsupported_layers))
            print("Please check for supported extensions.")
            exit(1)

        # Loading the IENetwork into the plugin
        self.exec_network = self.plugin.load_network(self.network, device)

        # Get the input layer
        self.input_blob = next(iter(self.network.inputs))
        self.output_blob = next(iter(self.network.outputs))
        self.out_shape = self.network.outputs[self.output_blob].shape
        logging.info("Model Detection output shape printed : ", self.out_shape)
        return

    def predict(self, image, width, height, threshold):
        '''
        TODO: You will need to complete this method.
        This method will be use for running predictions on the input image.
        ref source code: https://github.com/gauravshelangia/computer-pointer-controller/blob/master/src/face_detection.py
        '''
        tally = 0
        values = None
        width = image.shape[1]
        height = image.shape[0]
        img_frame = self.preprocess_input(image)
        if self.async_mode:
            self.exec_network.requests[0].async_infer(
                inputs={self.input_blob: img_frame})
        else:
            self.exec_network.requests[0].infer(
                inputs={self.input_blob: img_frame})

        if self.exec_network.requests[0].wait(-1) == 0:
            outputs = self.exec_network.requests[0].outputs[self.output_blob]
            img_frame, values = self.preprocess_output(image, outputs)
            return values, img_frame

    def get_input_shape(self):
        ### Return the shape of the input layer ###
        return self.network.inputs[self.input_blob].shape

    def preprocess_input(self, image):
        '''
        Before feeding the data into the model for inference,
        you might have to preprocess tehe model and this function will allow you to do that.
        '''
        (n, c, h, w) = self.network.inputs[self.input_blob].shape
        img_frame = cv2.resize(image, (w, h))
        img_frame = img_frame.transpose((2, 0, 1))
        img_frame = img_frame.reshape((n, c, h, w))
        return img_frame

    def preprocess_output(self, img_frame, outputs):
        '''
        Before feeding the output of this model to the next model,
        you might have to preprocess the output. This function is where you can do that.
        '''
        # [1, 1, N, 7]
        # [image_id, label, conf, xmin, ymin, xmax, ymax]
        now_value = 0
        values = []
        for box in outputs[0][0]:
            # Draw a bounding box/boxes
            confidence = box[2]
            if confidence > float(threshold):
                if box[3] < 0:
                    box[3] = -box[3]
                if obj[4] < 0:
                    box[4] = -box[4]
                xmin = int(box[3] * width) - 10
                ymin = int(box[4] * height) - 10
                xmax = int(box[5] * width) + 10
                ymax = int(box[6] * height) + 10
                # Draw the box on image
                cv2.rectangle(img_frame, (xmin, ymin), (xmax, ymax),
                              (0, 0, 255), 2)
                now_value = now_value + 1
                values.append([xmin, ymin, xmax, ymax])
                break
        return img_frame, values

    def clean(self):
        """
        delete all the open instances
        :return: None
        """
        del self.plugin
        del self.network
        del self.exec_network
        del self.net
        del self.device
コード例 #2
0
class Network:
    """
    Load and configure inference plugins for the specified target devices 
    and performs synchronous and asynchronous modes for the specified infer requests.
    """

    def __init__(self):
        """
        Initialize any class variables desired
        """
        self.network = None
        self.plugin = None
        self.input_blob = None
        self.output_blob = None
        self.net_plugin = None
        self.inf_request = None

    def load_model(self, model, device, num_requests, cpu_extension=None, plugin=None):
        """
        Load the model
        """
        model_xml = model
        model_bin = os.path.splitext(model_xml)[0] + ".bin"

        # Plugin initialization for specified device
        if not plugin:
            log.info("Initializing plugin for {} device...".format(device))
            self.plugin = IEPlugin(device=device)
        else:
            self.plugin = plugin

        # load cpu extension if specified
        if cpu_extension and 'CPU' in device:
            self.plugin.add_cpu_extension(cpu_extension)
        
        self.network = IENetwork(model=model_xml, weights=model_bin)

       
        if self.plugin.device == "CPU":
            # Get the supported layers of the network
            supported_layers = self.plugin.get_supported_layers(self.network)

            unsupported_layers = [layer for layer in self.network.layers.keys() if layer not in supported_layers]
            if len(unsupported_layers) != 0:
                self.plugin.add_extension(cpu_extension, device)

        self.net_plugin = self.plugin.load(self.network, num_requests=num_requests)

        # Get the input layer
        self.input_blob = next(iter(self.network.inputs))
        self.output_blob = next(iter(self.network.outputs))
        
        return self.plugin, self.get_input_shape()

    def get_input_shape(self):
        """
        Return the shape of the input layer
        """
        return self.network.inputs[self.input_blob].shape

    def exec_net(self, request_id, frame):
        """
        Start an asynchronous request
        """

        self.inf_request = self.net_plugin.start_async(
            request_id=request_id, inputs={self.input_blob: frame}
        )

        return self.net_plugin

    def wait(self, request_id):
        """
        Wait for the request to be complete
        """
        status = self.net_plugin.requests[request_id].wait(-1)
        return status

    def get_output(self, request_id, output=None):
        """
        Extract and return the output results
        """
        if output:
            output = self.inf_request.outputs[output]
        else:
            output = self.net_plugin.requests[request_id].outputs[self.output_blob]
        return output

    def clean(self):
        """
        Deletes all the instances
        """
        del self.net_plugin
        del self.plugin
        del self.network
コード例 #3
0
class Network:
    """
    Load and configure inference plugins for the specified target devices 
    and performs synchronous and asynchronous modes for the specified infer requests.
    """
    def __init__(self):
        ### TODO: Initialize any class variables desired ###
        self.net = None
        self.plugin = None
        self.input_blob = None
        self.output_blob = None
        self.net_plugin = None
        self.infer_request_handle = None

    def load_model(self,
                   model,
                   cpu_extension,
                   device,
                   num_requests,
                   plugin=None):
        ### TODO: Load the model ###
        model_xml = model
        model_bin = os.path.splitext(model_xml)[0] + ".bin"

        # Load Network
        self.net = IENetwork(model=model_xml, weights=model_bin)

        ### TODO: Add any necessary extensions ###
        # Load Inference engine
        # self.plugin = IECore()
        self.plugin = IEPlugin(device='GPU')  #IECore()

        if cpu_extension and 'CPU' in device:
            self.plugin.add_extension(cpu_extension, "CPU")

        ### TODO: Check for supported layers ###
        if "CPU" in device:
            # supported_layers = self.plugin.query_network(self.net, 'CPU')
            supported_layers = self.plugin.get_supported_layers(self.net)

            not_supported_layers = [
                l for l in self.net.layers.keys() if l not in supported_layers
            ]
            if len(not_supported_layers) != 0:
                log.error("Not Supported Layes for device: {}:{}".format(
                    device, not_supported_layers))
                sys.exit(1)

        self.input_blob = next(iter(self.net.inputs))
        self.output_blob = next(iter(self.net.outputs))

        if num_requests == 0:
            self.net_plugin = self.plugin.load(network=self.net)
        else:
            self.net_plugin = self.net_plugin(network=self.net,
                                              num_requests=num_requests)

        ### TODO: Return the loaded inference plugin ###
        ### Note: You may need to update the function parameters. ###
        return self.plugin, self.get_input_shape()

    def get_input_shape(self):
        ### TODO: Return the shape of the input layer ###

        return self.net.inputs[self.input_blob].shape

    def exec_net(self, request_id, frame):
        ### TODO: Start an asynchronous request ###
        self.infer_request_handle = self.net.plugin.start_async(
            request_id=request_id, inputs={self.input_blob: frame})
        ### TODO: Return any necessary information ###
        ### Note: You may need to update the function parameters. ###
        return self.net_plugin

    def wait(self, request_id):
        ### TODO: Wait for the request to be complete. ###
        wait_process = self.net_plugin.requests[request_id].wait(-1)
        ### TODO: Return any necessary information ###
        ### Note: You may need to update the function parameters. ###
        return wait_process

    def get_output(self, request_id, output=None):
        ### TODO: Extract and return the output results
        if output:
            res = self.infer_request_handle.outputs[output]
        else:
            res = self.net_plugin.requests[request_id].outputs[
                self.output_blob]
        ### Note: You may need to update the function parameters. ###
        return res