class Network:
    """
    Load and configure inference plugins for the specified target devices
    and performs synchronous and asynchronous modes for the specified infer requests.
    """
    def __init__(self):
        """
        Initialize any class variables desired

        """
        self.net = None
        self.plugin = None
        self.input_blob = None
        self.out_blob = None
        self.net_plugin = None
        self.infer_request_handle = None

    def load_model(self,
                   model,
                   device,
                   input_size,
                   output_size,
                   num_requests,
                   cpu_extension=None,
                   plugin=None):
        """
         Loads a network and an image to the Inference Engine plugin.
        :param model: .xml file of pre trained model
        :param cpu_extension: extension for the CPU device
        :param device: Target device
        :param input_size: Number of input layers
        :param output_size: Number of output layers
        :param num_requests: Index of Infer request value. Limited to device capabilities.
        :param plugin: Plugin for specified device
        :return:  Shape of input layer
        """

        model_xml = model
        model_bin = os.path.splitext(model_xml)[0] + ".bin"
        # Plugin initialization for specified device
        # and load extensions library if specified
        if not plugin:
            log.info("Initializing plugin for {} device...".format(device))
            self.plugin = IEPlugin(device=device)
        else:
            self.plugin = plugin

        if cpu_extension and 'CPU' in device:
            self.plugin.add_cpu_extension(cpu_extension)

        # Read IR
        log.info("Reading IR...")
        self.net = IENetwork(model=model_xml, weights=model_bin)
        log.info("Loading IR to the plugin...")

        if self.plugin.device == "CPU":
            supported_layers = self.plugin.get_supported_layers(self.net)
            not_supported_layers = \
                [l for l in self.net.layers.keys() if l not in supported_layers]
            if len(not_supported_layers) != 0:
                log.error("Following layers are not supported by "
                          "the plugin for specified device {}:\n {}".format(
                              self.plugin.device,
                              ', '.join(not_supported_layers)))
                log.error("Please try to specify cpu extensions library path"
                          " in command line parameters using -l "
                          "or --cpu_extension command line argument")
                sys.exit(1)

        if num_requests == 0:
            # Loads network read from IR to the plugin
            self.net_plugin = self.plugin.load(network=self.net)
        else:
            self.net_plugin = self.plugin.load(network=self.net,
                                               num_requests=num_requests)

        self.input_blob = next(iter(self.net.inputs))
        self.out_blob = next(iter(self.net.outputs))
        assert len(self.net.inputs.keys()) == input_size, \
            "Supports only {} input topologies".format(len(self.net.inputs))
        assert len(self.net.outputs) == output_size, \
            "Supports only {} output topologies".format(len(self.net.outputs))

        return self.plugin, self.get_input_shape()

    def load_model_2(self,
                     model,
                     device,
                     input_size,
                     output_size,
                     num_requests,
                     cpu_extension=None,
                     plugin=None):
        """
            Load the model
            Check for supported layers
            Add any necessary extensions
            Return the loaded inference plugin
        """

        model_xml = model
        model_bin = os.path.splitext(model_xml)[0] + ".bin"

        # Plugin initialization for specified device
        # and load extensions library if specified

        if not plugin:
            log.info("Initializing plugin for {} device...".format(device))
            self.plugin = IECore()
        else:
            self.plugin = plugin

        if cpu_extension and 'CPU' in device:
            self.plugin.add_extension(cpu_extension, "CPU")

        # Read IR
        log.info("Reading IR...")

        self.net = IECore.load_network(model=model_xml, weights=model_bin)
        log.info("Loading IR to the plugin...")

        if "CPU" in device:
            supported_layers = self.plugin.query_network(self.net, "CPU")
            not_supported_layers = \
                [l for l in self.net.layers.keys() if l not in supported_layers]
            if len(not_supported_layers) != 0:
                log.error("Following layers are not supported by "
                          "the plugin for specified device {}:\n {}".format(
                              device, ', '.join(not_supported_layers)))
                log.error("Please try to specify cpu extensions library path"
                          " in command line parameters using -l "
                          "or --cpu_extension command line argument")
                sys.exit(1)

        if num_requests == 0:
            # Loads network read from IR to the plugin
            self.net_plugin = self.plugin.load_network(network=self.net,
                                                       device_name=device)
        else:
            self.net_plugin = self.plugin.load_network(
                network=self.net,
                num_requests=num_requests,
                device_name=device)

        self.input_blob = next(iter(self.net.inputs))
        self.out_blob = next(iter(self.net.outputs))
        assert len(self.net.inputs.keys()) == input_size, \
            "Supports only {} input topologies".format(len(self.net.inputs))
        assert len(self.net.outputs) == output_size, \
            "Supports only {} output topologies".format(len(self.net.outputs))

        return self.plugin, self.get_input_shape()

    def get_input_shape(self):
        """
        Return the shape of the input layer

        """
        return self.net.inputs[self.input_blob].shape

    def exec_net(self, request_id, frame):
        """
        Start an asynchronous request
        Return any necessary information
        Note: You may need to update the function parameters.
        """
        self.infer_request_handle = self.net_plugin.start_async(
            request_id=request_id, inputs={self.input_blob: frame})
        return self.net_plugin

    def wait(self, request_id):
        """
            Wait for the request to be complete.
            Return any necessary information
            Note: You may need to update the function parameters.
        """
        request_waiting = self.net_plugin.requests[request_id].wait(-1)
        return request_waiting

    def get_output(self, request_id, output=None):
        """
           Extract and return the output results
           Note: You may need to update the function parameters.
       """
        if output:
            return self.infer_request_handle.outputs[output]
        else:
            return self.net_plugin.requests[request_id].outputs[self.out_blob]
예제 #2
0
class Network:
    """
    Load and configure inference plugins for the specified target devices 
    and performs synchronous and asynchronous modes for the specified infer requests.
    """
    def __init__(self):
        ### TODO: Initialize any class variables desired ###
        self.net = None
        self.plugin = None
        self.input_blob = None
        self.output_blob = None
        self.net_plugin = None
        self.infer_request = None

    def load_model(self,
                   model,
                   device,
                   num_requests,
                   cpu_extension=None,
                   plugin=None):
        ### TODO: Load the model ###
        load_model_xml = model
        load_model_bin = os.path.splitext(load_model_xml)[0] + ".bin"

        while not plugin:
            log.info("Please wait. Starting plugin for {} device... ".format(
                device))
            self.plugin = IECore()

        else:
            self.plugin = plugin

        if cpu_extension and CPU in device:
            self.plugin.add_cpu_extension(cpu_extension)

        log.info('Reading IR, Please wait.')
        self.net = IENetwork(model=load_model_xml, weights=load_model_bin)
        log.info(
            'Completed. Loading IR to the plugin. This may take some time')
        ### TODO: Check for supported layers ###
        if self.plugin.device == "CPU":
            supported_layers = self.plugin.get_supported_layers(self.net)
            unsupported_layers = [
                l for l in self.net.layers.key() if l not in supported_layers
            ]

            if len(unsupported_layers) != 0:
                log.error(
                    'There are a number of unsupported layers found: '.format(
                        unsupported_layers))
                sys.exit(1)

        if num_request == 0:
            self.net_plugin = self.plugin.load(network=self.net)

        else:
            self.net_plugin = self.plugin.load(network=self.net,
                                               num_requests=num_requests)

        self.input_blob = next(iter(self.net.input))
        self.output_blob = next(iter(self.net.output))

        if len(self.net.inputs.key()) == input_size:
            log.error(
                'Sorry, this app supports {} input topologies. Please make the necessary changes and try again'
                .format(len(self.net.inputs)))
            sys.exit(1)

        if len(self.net.outputs) == output_size:
            log.error(
                'Sorry, this app supports {} output topologies. Please make the necessary changes and try again'
                .format(len(self.net.inputs)))
            sys.exit(1)

        return self.plugin, self.get_input_shape

        ### TODO: Add any necessary extensions ###
        ### TODO: Return the loaded inference plugin ###
        ### Note: You may need to update the function parameters. ###
        return

    def get_input_shape(self):
        ### TODO: Return the shape of the input layer ###
        return self.net.inputs[self.input.blob].shape

    def exec_net(self, request_id, frame):
        ### TODO: Start an asynchronous request ###
        ### TODO: Return any necessary information ###
        ### Note: You may need to update the function parameters. ###
        self.infer_request = self.net_plugin.start_async(
            request_id=request_id, inputs={self.input_blob: frame})

        return self.net_plugin

    def wait(self, request_id):
        ### TODO: Wait for the request to be complete. ###
        ### TODO: Return any necessary information ###
        ### Note: You may need to update the function parameters. ###

        wait_status = self.net_plugin.requests[request_id].wait(-1)

        return wait_status

    def get_output(self):
        ### TODO: Extract and return the output results
        ### Note: You may need to update the function parameters. ###

        if output:
            result = self.infer_request.outputs[output]

        else:
            result = self.net_plugin.requests[request_id].outputs[
                self.output_blob]

        return result

    def delete_instances(self):
        del self.net_plugin
        del self_plugin
        del self.net