parser = argparse.ArgumentParser()
    parser.add_argument("--deep_model", default="3_class_model_mobilenet_v3_small_v2.1_1080x1920.tflite", help="Path of the deeplabv3plus model.")
    parser.add_argument('--image_width', type=int, default=1920, help='Model Image dimension (width). (Default=None)')
    parser.add_argument('--image_height', type=int, default=1080, help='Model Image dimension (height). (Default=None)')
    parser.add_argument("--num_threads", type=int, default=4, help="Threads.")
    args = parser.parse_args()

    deep_model    = args.deep_model
    image_width  = args.image_width
    image_height = args.image_height
    num_threads   = args.num_threads

    interpreter = Interpreter(model_path=deep_model)
    try:
        interpreter.set_num_threads(num_threads)
    except:
        print("WARNING: The installed PythonAPI of Tensorflow/Tensorflow Lite runtime does not support Multi-Thread processing.")
        print("WARNING: It works in single thread mode.")
        print("WARNING: If you want to use Multi-Thread to improve performance on aarch64/armv7l platforms, please refer to one of the below to implement a customized Tensorflow/Tensorflow Lite runtime.")
        print("https://github.com/PINTO0309/Tensorflow-bin.git")
        print("https://github.com/PINTO0309/TensorflowLite-bin.git")
        pass
    
    interpreter.allocate_tensors()
    input_details = interpreter.get_input_details()[0]['index']
    deeplabv3_predictions = interpreter.get_output_details()[0]['index']

    color_image = cv2.imread("/home/pi/03_integer_quantization/soren_images/2020_10_02_10_58_15/color.png") # path to color image
    color_image = cv2.resize(color_image, (image_width, image_height)) 
    cv2.imshow("Input color image",color_image)
Esempio n. 2
0
class CNNClassifier(object):
    def __init__(self, model_file, label_file):
        logger.info(model_file)
        self._interpreter = Interpreter(model_path=model_file)
        self._interpreter.set_num_threads(4)
        self._interpreter.allocate_tensors()
        self._labels = self.load_labels(label_file)
        self._input_details = self._interpreter.get_input_details()
        self._output_details = self._interpreter.get_output_details()
        self._input_height = self._input_details[0]['shape'][1]
        self._input_width = self._input_details[0]['shape'][2]
        self._floating_model = (self._input_details[0]['dtype'] == np.float32)

    def close(self):
        pass

    def read_tensor_from_image_file(self, file_name):
        image = cv2.imread(file_name)
        return self.read_tensor_from_image_mat(image)

    def read_tensor_from_image_mat(self, image_mat):
        frame_rgb = cv2.cvtColor(image_mat, cv2.COLOR_BGR2RGB)
        frame_resized = cv2.resize(frame_rgb,
                                   (self._input_width, self._input_height))
        input_data = np.expand_dims(frame_resized, axis=0)

        # Normalize pixel values if using a floating model (i.e. if model is non-quantized)
        if self._floating_model:
            input_mean = 127.5
            input_std = 127.5
            input_data = (np.float32(input_data) - input_mean) / input_std

        return input_data

    def load_labels(self, label_file):
        labels = []
        with open(label_file, 'r') as f:
            labels = [line.strip() for line in f.readlines()]
        return labels

    def classify_image(self, image_file_or_mat, top_results=3):
        input_image = None
        if isinstance(image_file_or_mat, str):
            input_image = self.read_tensor_from_image_file(
                file_name=image_file_or_mat)
        else:
            input_image = self.read_tensor_from_image_mat(image_file_or_mat)

        self._interpreter.set_tensor(self._input_details[0]['index'],
                                     input_image)
        self._interpreter.invoke()
        scores = self._interpreter.get_tensor(
            self._output_details[0]['index'])[0]

        #print("scores: " + str(scores))
        confidence = 0.4
        base = 1
        # normalize to int8 for quantized models
        if len(scores) > 0 and (scores[0] == int(scores[0])):
            confidence = 128
            base = 256
        pairs = []
        for i in range(0, len(scores)):
            if scores[i] > confidence:
                object_name = self._labels[i]
                pairs.append((object_name, int(100 * scores[i] / base)))

        pairs = sorted(pairs, key=lambda x: x[1], reverse=True)[:top_results]
        return pairs

    def detect_objects(self, image_file_or_mat, top_results=3):
        input_image = None
        if isinstance(image_file_or_mat, str):
            input_image = self.read_tensor_from_image_file(
                file_name=image_file_or_mat)
        else:
            input_image = self.read_tensor_from_image_mat(image_file_or_mat)

        self._interpreter.set_tensor(self._input_details[0]['index'],
                                     input_image)
        self._interpreter.invoke()

        # Retrieve detection results
        boxes = self._interpreter.get_tensor(self._output_details[0]['index'])[
            0]  # Bounding box coordinates of detected objects
        classes = self._interpreter.get_tensor(
            self._output_details[1]['index'])[
                0]  # Class index of detected objects
        scores = self._interpreter.get_tensor(
            self._output_details[2]['index'])[
                0]  # Confidence of detected objects

        # Loop over all detections and draw detection box if confidence is above minimum threshold
        min_conf_threshold = 0.1
        imH = 100
        imW = 100
        pairs = []
        for i in range(len(scores)):
            if ((scores[i] > min_conf_threshold) and (scores[i] <= 1.0)):

                # Get bounding box coordinates and draw box
                # Interpreter can return coordinates that are outside of image dimensions, need to force them to be within image using max() and min()
                ymin = int(max(1, (boxes[i][0] * imH)))
                xmin = int(max(1, (boxes[i][1] * imW)))
                ymax = int(min(imH, (boxes[i][2] * imH)))
                xmax = int(min(imW, (boxes[i][3] * imW)))

                object_name = self._labels[int(classes[i]) + 1]
                pairs.append((object_name, int(100 * scores[i]), (xmin, ymin,
                                                                  xmax, ymax)))

        pairs = sorted(pairs, key=lambda x: x[1], reverse=True)[:top_results]
        logger.info(str(pairs))
        return pairs