Ejemplo n.º 1
0
    def benchmark_callback(self, network_inputs_data):
        latencies = list()

        if self._network:
            ie_network = self._network.ie_network
        else:
            ie_network = ie.IENetwork(self._configuration.model,
                                      self._configuration.weights)
        plugin = ie.IEPlugin(self._configuration.device)
        if self._configuration.cpu_extension:
            plugin.add_cpu_extension(self._configuration.cpu_extension)
        exec_network = plugin.load(ie_network)

        # warming up
        exec_network.infer(network_inputs_data)

        for i in range(self._iterations_count):
            start = datetime.datetime.now()
            exec_network.infer(network_inputs_data)
            latencies.append((datetime.datetime.now() - start).microseconds)
        self._latency = numpy.mean(latencies) / 1000000.0

        del ie_network
        del exec_network
        del plugin
Ejemplo n.º 2
0
    def _create_multi_device_plugin(self, log=True):
        async_mode = self.get_value_from_config('async_mode')
        if not async_mode:
            warning('Using multi device in sync mode non-applicable. Async mode will be used.')
        self.async_mode = True
        num_per_device_req = re.findall(NIREQ_REGEX, self._device)
        device_list = self._devices_list()
        num_devices = len(device_list)
        if num_per_device_req:
            brackets = r"(\()|(\))"
            num_per_device_requests = [int(re.sub(brackets, '', nreq)) for nreq in num_per_device_req]
            if 'num_requests' in self.config:
                warning(
                    "number requests already provided in device name specification. "
                    "'num_requests' option will be ignored."
                )
        else:
            num_per_device_requests = get_or_parse_value(self.config.get('num_requests', 1), casting_type=int)
        if len(num_per_device_requests) == 1:
            num_per_device_requests = [num_per_device_requests[0]] * len(device_list)

        if num_devices != len(num_per_device_requests):
            raise ConfigError('num requests for all {} should be specified'.format(num_devices))
        device_strings = [
            '{device}({nreq})'.format(device=device, nreq=nreq)
            for device, nreq in zip(device_list, num_per_device_requests)
        ]
        self.plugin = ie.IEPlugin('MULTI:{}'.format(','.join(device_strings)))
        self._num_requests = sum(num_per_device_requests)*2
        if log:
            print_info('Loaded {} plugin version: {}'.format(self.plugin.device, self.plugin.version))
            print_info('Request number for each device:')
            for device, nreq in zip(device_list, num_per_device_requests):
                print_info('    {} - {}'.format(device, nreq))
Ejemplo n.º 3
0
    def create_ie_plugin(self, log=True):
        if hasattr(self, 'plugin'):
            del self.plugin
        if log:
            print_info('IE version: {}'.format(ie.get_version()))
        if self._is_multi():
            self._create_multi_device_plugin(log)
        else:
            self.plugin = ie.IEPlugin(self._device)
            self.async_mode = self.get_value_from_config('async_mode')
            num_requests = get_or_parse_value(self.config.get('num_requests', 1), casting_type=int)
            if len(num_requests) != 1:
                raise ConfigError('Several values for _num_requests specified')
            self._num_requests = num_requests[0]
            if self._num_requests != 1 and not self.async_mode:
                warning('{} infer requests in sync mode is not supported. Only 1 infer request will be used.')
                self._num_requests = 1
            if log:
                print_info('Loaded {} plugin version: {}'.format(self.plugin.device, self.plugin.version))

        cpu_extensions = self.config.get('cpu_extensions')
        if cpu_extensions and 'CPU' in self._devices_list():
            selection_mode = self.config.get('_cpu_extensions_mode')
            cpu_extensions = DLSDKLauncher.get_cpu_extension(cpu_extensions, selection_mode)
            self.plugin.add_cpu_extension(str(cpu_extensions))
        gpu_extensions = self.config.get('gpu_extensions')
        if gpu_extensions and 'GPU' in self._devices_list():
            self.plugin.set_config('CONFIG_FILE', str(gpu_extensions))
        if self._is_vpu():
            log_level = self.config.get('_vpu_log_level')
            if log_level:
                self.plugin.set_config({'VPU_LOG_LEVEL': log_level})
Ejemplo n.º 4
0
def openvino_random_input(xml, weights, scale=255.):
    net = ie.IENetwork(model=xml, weights=weights)
    assert (len(net.inputs) == 1)

    i0 = [k for k in net.inputs.keys()][0]
    plugin = ie.IEPlugin(device="CPU")
    exec_net = plugin.load(network=net)
    input_shape = exec_net.requests[0].inputs[i0].shape
    input_array = np.random.random(input_shape).astype(np.float32) * scale

    return input_array
Ejemplo n.º 5
0
    def __init__(self, configuration: CalibratorConfiguration):
        self._configuration = configuration

        network = self.create_network()
        self._input_layer_name = next(iter(network.inputs))
        self._output_layer_name = next(iter(network.outputs))

        self.plugin = ie.IEPlugin(self._configuration.device)
        if self._configuration.cpu_extension and self._configuration.device == 'CPU':
            self.plugin.add_cpu_extension(self._configuration.cpu_extension)
        if self._configuration.gpu_extension and self._configuration.device == 'GPU':
            self.plugin.set_config('CONFIG_FILE', self._configuration.gpu_extension)
Ejemplo n.º 6
0
def main():
    #######################  Device  Initialization  ########################
    #  Plugin initialization for specified device and load extensions library if specified
    plugin = ie.IEPlugin(device="MYRIAD")
    #########################################################################

    #########################  Load Neural Network  #########################
    #  Read in Graph file (IR)
    net = ie.IENetwork(model="cnn-mnist_inference.xml",
                       weights="cnn-mnist_inference.bin")

    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))
    #  Load network to the plugin
    exec_net = plugin.load(network=net)
    del net
    ########################################################################

    #########################  Obtain Input Tensor  ########################
    #  Obtain and preprocess input tensor (image)
    #  Read and pre-process input image  maybe we don't need to show these details
    image_for_inference = cv2.imread("./1.JPG")

    image_for_inference = cv2.cvtColor(image_for_inference, cv2.COLOR_BGR2GRAY)
    image_for_inference = cv2.resize(image_for_inference, (28, 28))

    image_for_inference = image_for_inference.astype(numpy.float32)

    image_for_inference[:] = 1 - ((image_for_inference[:]) * (1.0 / 255.0))

    image_for_inference = image_for_inference.reshape(-1, 28, 28)
    # ########################################################################

    # ##########################  Start  Inference  ##########################
    # #  Start synchronous inference and get inference result
    start_time = time.time()
    req_handle = exec_net.start_async(0,
                                      inputs={input_blob: image_for_inference})
    # # ########################################################################
    # res = exec_net.infer({input_blob:image_for_inference})
    # # ######################## Get Inference Result  #########################
    status = req_handle.wait()
    res = req_handle.outputs[out_blob]

    # Do something with the results... (like print top 5)
    print("FPS:", 1 / (time.time() - start_time))

    print((1 - res[0]).argsort()[:1])
    # ###############################  Clean  Up  ############################
    del exec_net
    del plugin
Ejemplo n.º 7
0
    def __init__(self, config_entry, adapter):
        super().__init__(config_entry, adapter)

        dlsdk_launcher_config = DLSDKLauncherConfig('DLSDK_Launcher')
        dlsdk_launcher_config.validate(self._config)

        self._device = self._config['device'].upper()
        self._prepare_bitstream_firmware(self._config)
        self.plugin = ie.IEPlugin(self._device)
        print_info('Loaded {} plugin version: {}'.format(
            self.plugin.device, self.plugin.version))

        if dlsdk_launcher_config.need_conversion:
            self._convert_model(self._config, dlsdk_launcher_config.framework)
        else:
            self._model = self._config['model']
            self._weights = self._config['weights']

        if self._config.get('cpu_extensions') and 'CPU' in self._device:
            self.plugin.add_cpu_extension(
                str(self._config.get('cpu_extensions')))
        if self._config.get('gpu_extensions') and 'GPU' in self._device:
            self.plugin.set_config('CONFIG_FILE',
                                   str(self._config.get('gpu_extensions')))
        self.network = ie.IENetwork.from_ir(model=str(self._model),
                                            weights=str(self._weights))
        self.original_outputs = self.network.outputs
        outputs = self._config.get('outputs')
        if outputs:
            self.network.add_outputs(outputs)
        self.exec_network = self.plugin.load(network=self.network)
        self._config_inputs = parse_inputs(self._config.get('inputs', []))
        check_user_inputs(self.network.inputs.keys(), self._config_inputs)
        reshape_user_inputs(self._config_inputs,
                            self.exec_network.requests[0].inputs)
        image_inputs = list(
            filter(lambda input: input not in self._config_inputs,
                   self.inputs.keys()))
        if not image_inputs:
            raise ValueError('image input is not found')
        if len(image_inputs) > 1:
            raise ValueError(
                'topologies with several image inputs are not supported')
        self._image_input_blob = image_inputs[0]
        self._batch = self.network.inputs[self._image_input_blob].shape[0]
Ejemplo n.º 8
0
    def _create_ie_plugin(self, log=True):
        if hasattr(self, 'plugin'):
            del self.plugin
        self.plugin = ie.IEPlugin(self._device)
        if log:
            print_info('IE version: {}'.format(ie.get_version()))
            print_info('Loaded {} plugin version: {}'.format(
                self.plugin.device, self.plugin.version))

        cpu_extensions = self._config.get('cpu_extensions')
        if cpu_extensions and 'CPU' in self._device:
            selection_mode = self._config.get('_cpu_extensions_mode')
            cpu_extensions = DLSDKLauncher.get_cpu_extension(
                cpu_extensions, selection_mode)
            self.plugin.add_cpu_extension(str(cpu_extensions))
        if self._config.get('gpu_extensions') and 'GPU' in self._device:
            self.plugin.set_config('CONFIG_FILE',
                                   str(self._config.get('gpu_extensions')))
Ejemplo n.º 9
0
def main():
    frame_interval = 3  # Number of frames after which to run face detection
    fps_display_interval = 5  # seconds
    frame_rate = 0
    frame_count = 0
    start_time = time.time()

    parser = get_parser()
    args = parser.parse_args()

    use_classifier = bool(args.classifier)

    extensions = os.environ.get('INTEL_EXTENSIONS_PATH')
    plugin = ie.IEPlugin(device=args.device)

    if extensions and "CPU" in args.device:
        for ext in extensions.split(':'):
            print("LOAD extension from {}".format(ext))
            plugin.add_cpu_extension(ext)

    print('Load PNET')

    pnets = []
    for r in parse_resolutions(args.resolutions):
        p = PNetHandler(plugin, r[0], r[1])
        pnets.append(p)

    print('Load RNET')
    net = ie.IENetwork.from_ir(*net_filenames(plugin, 'rnet'))
    rnet_input_name = list(net.inputs.keys())[0]
    rnet_output_name0 = net.outputs[0]
    rnet_output_name1 = net.outputs[1]
    r_net = plugin.load(net)

    print('Load ONET')

    net = ie.IENetwork.from_ir(*net_filenames(plugin, 'onet'))
    onet_input_name = list(net.inputs.keys())[0]
    onet_batch_size = net.inputs[onet_input_name][0]
    onet_output_name0 = net.outputs[0]
    onet_output_name1 = net.outputs[1]
    onet_output_name2 = net.outputs[2]
    print('ONET_BATCH_SIZE = {}'.format(onet_batch_size))
    o_net = plugin.load(net)

    if use_classifier:
        print('Load FACENET')

        model_file = args.graph
        weights_file = model_file[:-3] + 'bin'

        net = ie.IENetwork.from_ir(model_file, weights_file)
        facenet_input = list(net.inputs.keys())[0]
        facenet_output = net.outputs[0]
        face_net = plugin.load(net)

        # Load classifier
        with open(args.classifier, 'rb') as f:
            opts = {'file': f}
            if six.PY3:
                opts['encoding'] = 'latin1'
            (model, class_names) = pickle.load(**opts)

    minsize = 20  # minimum size of face
    threshold = [0.6, 0.7, 0.7]  # three steps's threshold
    factor = 0.709  # scale factor

    # video_capture = cv2.VideoCapture(0)
    if args.image is None:
        from imutils.video import VideoStream
        from imutils.video import FPS
        vs = VideoStream(usePiCamera=args.camera_device == "PI",
                         # resolution=(640, 480),
                         # framerate=24
                         ).start()
        time.sleep(1)
        fps = FPS().start()

    bounding_boxes = []
    labels = []

    pnets_proxy = []
    for p in pnets:
        pnets_proxy.append(p.proxy())

    def _rnet_proxy(img):
        output = r_net.infer({rnet_input_name: img})
        return output[rnet_output_name0], output[rnet_output_name1]

    def _onet_proxy(img):
        # img = img.reshape([1, 3, 48, 48])
        output = o_net.infer({onet_input_name: img})
        return output[onet_output_name0], output[onet_output_name1], output[
            onet_output_name2]

    pnet, rnet, onet = detect_face.create_openvino_mtcnn(
        pnets_proxy, _rnet_proxy, _onet_proxy, onet_batch_size)
    try:
        while True:
            # Capture frame-by-frame
            if args.image is None:
                frame = vs.read()
                if isinstance(frame, tuple):
                    frame = frame[1]
            else:
                frame = cv2.imread(args.image).astype(np.float32)

            # h = 400
            # w = int(round(frame.shape[1] / (frame.shape[0] / float(h))))
            h = 480
            w = 640
            if (frame.shape[1] != w) or (frame.shape[0] != h):
                frame = cv2.resize(frame, (w, h), interpolation=cv2.INTER_AREA)

            # BGR -> RGB
            rgb_frame = frame[:, :, ::-1]
            # rgb_frame = frame
            # print("Frame {}".format(frame.shape))

            if (frame_count % frame_interval) == 0:
                # t = time.time()
                bounding_boxes, _ = detect_face.detect_face_openvino(
                    rgb_frame, pnet, rnet, onet, threshold)
                # d = (time.time() - t) * 1000
                # LOG.info('recognition: %.3fms' % d)
                # Check our current fps
                end_time = time.time()
                if (end_time - start_time) > fps_display_interval:
                    frame_rate = int(frame_count / (end_time - start_time))
                    start_time = time.time()
                    frame_count = 0

                if use_classifier:
                    imgs = get_images(rgb_frame, bounding_boxes)
                    labels = []
                    # t = time.time()
                    for img_idx, img in enumerate(imgs):
                        img = img.astype(np.float32)

                        # Infer
                        img = img.transpose([2, 0,
                                             1]).reshape([1, 3, 160, 160])
                        output = face_net.infer({facenet_input: img})
                        output = output[facenet_output]
                        try:
                            output = output.reshape(1, model.shape_fit_[1])
                            predictions = model.predict_proba(output)
                        except ValueError as e:
                            # Can not reshape
                            print("ERROR: Output from graph doesn't consistent"
                                  " with classifier model: %s" % e)
                            continue

                        best_class_indices = np.argmax(predictions, axis=1)
                        best_class_probabilities = predictions[
                            np.arange(len(best_class_indices)),
                            best_class_indices]

                        for i in range(len(best_class_indices)):
                            bb = bounding_boxes[img_idx].astype(int)
                            text = '%.1f%% %s' % (
                                best_class_probabilities[i] * 100,
                                class_names[best_class_indices[i]])
                            labels.append({
                                'label': text,
                                'left': bb[0],
                                'top': bb[1] - 5
                            })
                            # DEBUG
                            print('%4d  %s: %.3f' %
                                  (i, class_names[best_class_indices[i]],
                                   best_class_probabilities[i]))
                    # d = (time.time() - t) * 1000
                    # LOG.info('facenet: %.3fms' % d)

            add_overlays(frame, bounding_boxes, frame_rate, labels=labels)

            frame_count += 1
            if args.image is None:
                cv2.imshow('Video', frame)
            else:
                print(bounding_boxes)
                break

            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

    except (KeyboardInterrupt, SystemExit) as e:
        print('Caught %s: %s' % (e.__class__.__name__, e))

    # When everything is done, release the capture
    # video_capture.release()
    if args.image is None:
        fps.stop()
        vs.stop()
        cv2.destroyAllWindows()
    print('Finished')
Ejemplo n.º 10
0
def main(args):
    use_mlboard = False
    mlboard = None
    if client:
        mlboard = client.Client()
        try:
            mlboard.apps.get()
        except Exception:
            mlboard = None
            print('Do not use mlboard.')
        else:
            print('Use mlboard parameters logging.')
            use_mlboard = True

    if args.use_split_dataset:
        dataset_tmp = facenet.get_dataset(args.data_dir)
        train_set, test_set = split_dataset(dataset_tmp,
                                            args.min_nrof_images_per_class,
                                            args.nrof_train_images_per_class)
        if args.mode == 'TRAIN':
            dataset = train_set
        elif args.mode == 'CLASSIFY':
            dataset = test_set
    else:
        dataset = facenet.get_dataset(args.data_dir)

    update_data({'mode': args.mode}, use_mlboard, mlboard)

    # Check that there are at least one training image per class
    for cls in dataset:
        assert len(
            cls.image_paths
        ) > 0, 'There must be at least one image for each class in the dataset'

    paths, labels = facenet.get_image_paths_and_labels(dataset)

    print('Number of classes: %d' % len(dataset))
    print('Number of images: %d' % len(paths))
    data = {
        'num_classes': len(dataset),
        'num_images': len(paths),
        'model_path': args.model,
        'image_size': args.image_size,
        'data_dir': args.data_dir,
        'batch_size': args.batch_size,
    }
    update_data(data, use_mlboard, mlboard)

    # Load the model
    print('Loading feature extraction model')
    xml_file = args.model
    bin_file = xml_file[:-3] + 'bin'

    net = ie.IENetwork.from_ir(xml_file, bin_file)
    extensions = os.environ.get('INTEL_EXTENSIONS_PATH')
    plugin = ie.IEPlugin(device=args.device)

    if extensions and "CPU" in args.device:
        for ext in extensions.split(':'):
            print("LOAD extension from {}".format(ext))
            plugin.add_cpu_extension(ext)

    input_name = list(net.inputs.keys())[0]
    output_name = net.outputs[0]
    exec_net = plugin.load(net)

    # Run forward pass to calculate embeddings
    print('Calculating features for images')
    nrof_images = len(paths)
    nrof_batches_per_epoch = int(math.ceil(1.0 * nrof_images /
                                           args.batch_size))
    emb_array = np.zeros((nrof_images, 512))
    for i in range(nrof_batches_per_epoch):
        start_index = i * args.batch_size
        end_index = min((i + 1) * args.batch_size, nrof_images)
        paths_batch = paths[start_index:end_index]
        for j in range(end_index - start_index):
            print('Batch {} <-> {}'.format(paths_batch[j],
                                           labels[start_index + j]))
        images = facenet.load_data(paths_batch, False, False, args.image_size)
        images = images.transpose([0, 3, 1, 2])
        feed_dict = {input_name: images}
        output = exec_net.infer(feed_dict)
        output = output[output_name]
        emb_array[start_index:end_index, :] = output

    classifier_filename_exp = os.path.expanduser(args.classifier_filename)

    if args.mode == 'TRAIN':
        # Train classifier
        print('Training classifier')
        model = svm.SVC(kernel='linear', probability=True)
        model.fit(emb_array, labels)

        # Create a list of class names
        class_names = [cls.name.replace('_', ' ') for cls in dataset]
        print('Classes:')
        print(class_names)

        # Saving classifier model
        with open(classifier_filename_exp, 'wb') as outfile:
            pickle.dump((model, class_names), outfile, protocol=2)
        print('Saved classifier model to file "%s"' % classifier_filename_exp)

    elif args.mode == 'CLASSIFY':
        # Classify images
        print('Testing classifier')
        with open(classifier_filename_exp, 'rb') as infile:
            (model, class_names) = pickle.load(infile)

        print('Loaded classifier model from file "%s"' %
              classifier_filename_exp)

        predictions = model.predict_proba(emb_array)
        best_class_indices = np.argmax(predictions, axis=1)
        best_class_probabilities = predictions[
            np.arange(len(best_class_indices)), best_class_indices]

        for i in range(len(best_class_indices)):
            print('%4d  %s: %.3f' % (i, class_names[best_class_indices[i]],
                                     best_class_probabilities[i]))

        accuracy = np.mean(np.equal(best_class_indices, labels))
        update_data({'accuracy': accuracy}, use_mlboard, mlboard)
        print('Accuracy: %.3f' % accuracy)

        if args.upload_model and accuracy >= args.upload_threshold:
            timestamp = datetime.datetime.now().strftime('%s')

            upload_model(use_mlboard, mlboard, classifier_filename_exp,
                         'facenet-classifier', '1.0.0-%s' % timestamp)
Ejemplo n.º 11
0
    def __init__(
            self,
            device,
            face_detection_path,
            facenet_path=None,
            classifier=None,
            bg_remove_path=None,
            loaded_plugin=None,
            debug=False):

        self.use_classifiers = False
        self.debug = debug

        extensions = os.environ.get('INTEL_EXTENSIONS_PATH')
        if loaded_plugin is not None:
            plugin = loaded_plugin
        else:
            plugin = ie.IEPlugin(device=device)

        if extensions and "CPU" in device:
            for ext in extensions.split(':'):
                print("LOAD extension from {}".format(ext))
                plugin.add_cpu_extension(ext)

        print('Load FACE DETECTION')
        face_path = face_detection_path
        weights_file = face_path[:face_path.rfind('.')] + '.bin'
        net = ie.IENetwork(face_path, weights_file)
        self.face_detect = nets.FaceDetect(plugin, net)

        if facenet_path:
            print('Load FACENET')

            model_file = facenet_path
            weights_file = model_file[:model_file.rfind('.')] + '.bin'

            net = ie.IENetwork(model_file, weights_file)
            self.facenet_input = list(net.inputs.keys())[0]
            outputs = list(iter(net.outputs))
            self.facenet_output = outputs[0]
            self.face_net = plugin.load(net)

        if classifier and len(classifier) > 0:
            self.use_classifiers = bool(facenet_path)
            self.classifiers = []
            self.classifier_names = []
            self.embedding_sizes = []
            self.class_names = None
            self.class_stats = None

            for clfi, clf in enumerate(classifier):
                # Load classifier
                with open(clf, 'rb') as f:
                    print('Load CLASSIFIER %s' % clf)
                    opts = {'file': f}
                    if six.PY3:
                        opts['encoding'] = 'latin1'
                    (classifier, class_names, class_stats) = pickle.load(**opts)
                    if isinstance(classifier, svm.SVC):
                        embedding_size = classifier.shape_fit_[1]
                        clfn = "SVM classifier"
                        self.classifier_names.append("SVM")
                    elif isinstance(classifier, neighbors.KNeighborsClassifier):
                        embedding_size = classifier._fit_X.shape[1]
                        clfn = "kNN (neighbors %d) classifier" % classifier.n_neighbors
                        # self.classifier_names.append("kNN(%2d)" % classifier.n_neighbors)
                        self.classifier_names.append("kNN")
                    else:
                        # try embedding_size = 512
                        embedding_size = 512
                        clfn = type(classifier)
                        self.classifier_names.append("%d" % clfi)
                    print('Loaded %s, embedding size: %d' % (clfn, embedding_size))
                    if self.class_names is None:
                        self.class_names = class_names
                    elif class_names != self.class_names:
                        raise RuntimeError("Different class names in classifiers")
                    if self.class_stats is None:
                        self.class_stats = class_stats
                    elif class_stats != self.class_stats:
                        raise RuntimeError("Different class stats in classifiers")
                    self.embedding_sizes.append(embedding_size)
                    self.classifiers.append(classifier)

        self.bg_remove = bg_remove.get_driver(bg_remove_path)