Ejemplo n.º 1
0
    def __init__(self, model_path: str, **kwargs: Any) -> None:

        self._interpreter: tflite.Interpreter = tflite.Interpreter(
            model_path=model_path, **kwargs
        )
        self._input_details = self._interpreter.get_input_details()
        self._output_details = self._interpreter.get_output_details()
        self._interpreter.allocate_tensors()
Ejemplo n.º 2
0
def make_interpreter(model_file):
    model_file, *device = model_file.split('@')
    return tflite.Interpreter(
        model_path=model_file,
        experimental_delegates=[
            tflite.load_delegate('libedgetpu.so.1',
                                 {'device': device[0]} if device else {})
        ])
Ejemplo n.º 3
0
def make_interpreter(model_file):
    model_file, *device = model_file.split('@')
    return tflite.Interpreter(
        model_path=model_file,
        experimental_delegates=[
            tflite.load_delegate(EDGETPU_SHARED_LIB,
                                 {'device': device[0]} if device else {})
        ])
Ejemplo n.º 4
0
    def load_tflite(self, tflite_path: str) -> None:
        if self.tpu:
            self.interpreter = tflite.Interpreter(
                model_path=tflite_path,
                experimental_delegates=[load_delegate("libedgetpu.so.1")],
            )
        else:
            self.interpreter = tflite.Interpreter(model_path=tflite_path)

        self.interpreter.allocate_tensors()
        input_details = self.interpreter.get_input_details()[0]
        # width, height
        self.input_size = (input_details["shape"][2],
                           input_details["shape"][1])
        self.input_index = input_details["index"]
        output_details = self.interpreter.get_output_details()
        self.output_index = [details["index"] for details in output_details]
Ejemplo n.º 5
0
    def __setstate__(self, state):
        self._model_file, self._label_file, \
                self._input_details, self._output_details, self._labels = state

        self._interpreter = tflite.Interpreter(
            self._model_file,
            experimental_delegates=[tflite.load_delegate(self._lib_edgetpu)])
        self._interpreter.allocate_tensors()
Ejemplo n.º 6
0
def load_tflite_model():
    config.logger.info('loading tflite model from %s' %
                       config.VP_TFLITE_MODEL_PATH)
    interpreter = tflite.Interpreter(model_path=config.VP_TFLITE_MODEL_PATH)
    interpreter.allocate_tensors()
    config.logger.info('loaded model')

    return interpreter
Ejemplo n.º 7
0
 def __init__(self, path: str):
     self.interpreter = tflite.Interpreter(path)
     self.input_details = self.interpreter.get_input_details()
     self.output_details = self.interpreter.get_output_details()
     assert len(
         self.input_details
     ) == 1, "TFLiteModel only supports models with a single input"
     self.interpreter.allocate_tensors()
def run_inference_tflite(model_file_path, image, top_percent, input_size,
                         label_file, verbose):
    if os.path.exists(model_file_path) and os.path.exists(image):
        interpreter = tflite.Interpreter(model_path=model_file_path)
        interpreter.allocate_tensors()

        input_details = interpreter.get_input_details()
        output_details = interpreter.get_output_details()

        # check the type of the input tensor
        floating_model = input_details[0]['dtype'] == np.float32

        # NxHxWxC, H:1, W:2
        height = input_details[0]['shape'][1]
        width = input_details[0]['shape'][2]

        img = process_image_file(image, top_percent, input_size)
        img = img.astype('float32') / 255.0

        # add N dim
        input_data = np.expand_dims(img, axis=0)

        # The following was suggested by an example script, but it was throwing off the results compared to the
        # non-tflite model after commenting out, the results become the same as the non-tflite model when classifying
        # the same photos with both models.
        # if floating_model:
        # input_data = (np.float32(input_data) - args.input_mean) / args.input_std

        interpreter.set_tensor(input_details[0]['index'], input_data)

        start_time = time.time()
        interpreter.invoke()
        stop_time = time.time()

        output_data = interpreter.get_tensor(output_details[0]['index'])
        results = np.squeeze(output_data)

        top_k = results.argsort()[-5:][::-1]
        labels = load_labels(label_file)

        print("Prediction: " + labels[top_k[0]])

        # Print Further details
        if verbose:
            for i in top_k:
                if floating_model:
                    print('{:.3f}: {}'.format(float(results[i]), labels[i]))
                else:
                    print('{:.3f}: {}'.format(float(results[i] / 255.0),
                                              labels[i]))

            print('time: {:.3f}ms'.format((stop_time - start_time) * 1000))

        return labels[top_k[0]]

    else:
        print("One of the input files does not exist or couldn't be found.")
        return None
Ejemplo n.º 9
0
def initialize_detector(args):

    TPU_PATH = 'models/tpu/mobilenet_ssd_v2_coco_quant/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite' #'models/pednet/model/ped_tpu.tflite' 
    CPU_PATH = 'models/pednet/model/pednet.tflite'

    # initialize coral tpu model
    if args.tpu:
        print('   > TPU = TRUE')
        
        if args.model_path:
            model_path = args.model_path
            print('   > CUSTOM DETECTOR = TRUE')
            print(f'      > DETECTOR PATH = {model_path}')
        	
        else:
        	model_path = os.path.join(os.path.dirname(__file__), TPU_PATH)
        	print('   > CUSTOM DETECTOR = FALSE')
        
        _, *device = model_path.split('@')
        edgetpu_shared_lib = 'libedgetpu.so.1'
        interpreter = tflite.Interpreter(
                model_path,
                experimental_delegates=[
                    tflite.load_delegate(edgetpu_shared_lib,
                        {'device': device[0]} if device else {})
                ])
        interpreter.allocate_tensors()

    # initialize tflite model
    else:
        print('   > TPU = FALSE')
        
        if args.model_path:
            model_path = args.model_path
            print('   > CUSTOM DETECTOR = TRUE')
            print(f'      > DETECTOR PATH = {model_path}')
        	
        else:
        	print('   > CUSTOM DETECTOR = FALSE')
        	model_path = os.path.join(os.path.dirname(__file__), CPU_PATH)
        
        interpreter = tflite.Interpreter(model_path=model_path)
        interpreter.allocate_tensors()

    return interpreter
Ejemplo n.º 10
0
    def __init__(self,
                 tf_device=None,
                 model_path=None,
                 num_threads=3,
                 labels=None):
        self.fps = EventsPerSecond()
        if labels is None:
            self.labels = {}
        else:
            self.labels = load_labels(labels)

        device_config = {"device": "usb"}
        if not tf_device is None:
            device_config = {"device": tf_device}

        edge_tpu_delegate = None

        if tf_device != "cpu":
            try:
                logger.info(
                    f"Attempting to load TPU as {device_config['device']}")
                edge_tpu_delegate = load_delegate("libedgetpu.so.1.0",
                                                  device_config)
                logger.info("TPU found")
                self.interpreter = tflite.Interpreter(
                    model_path=model_path or "/edgetpu_model.tflite",
                    experimental_delegates=[edge_tpu_delegate],
                )
            except ValueError:
                logger.error(
                    "No EdgeTPU was detected. If you do not have a Coral device yet, you must configure CPU detectors."
                )
                raise
        else:
            logger.warning(
                "CPU detectors are not recommended and should only be used for testing or for trial purposes."
            )
            self.interpreter = tflite.Interpreter(model_path=model_path
                                                  or "/cpu_model.tflite",
                                                  num_threads=num_threads)

        self.interpreter.allocate_tensors()

        self.tensor_input_details = self.interpreter.get_input_details()
        self.tensor_output_details = self.interpreter.get_output_details()
Ejemplo n.º 11
0
    def __init__(self):
        self.lidar = Lidar(150)

        self.interpreter = tflite.Interpreter(
            model_path="leg_clf_lidar.tflite")
        self.interpreter.allocate_tensors()

        self.input_details = self.interpreter.get_input_details()
        self.output_details = self.interpreter.get_output_details()
    def __init__(self):
        self.interpreter = tflite.Interpreter('yamnet.tflite')

        self.input_details = self.interpreter.get_input_details()
        self.waveform_input_index = self.input_details[0]['index']
        self.output_details = self.interpreter.get_output_details()
        self.scores_output_index = self.output_details[0]['index']
        self.embeddings_output_index = self.output_details[1]['index']
        self.spectrogram_output_index = self.output_details[2]['index']
    def __init__(self):
        self.interpreter = tflite.Interpreter(model_path="model.tflite")
        self.interpreter.allocate_tensors()

        # Get input and output tensors.
        self.input_details = self.interpreter.get_input_details()
        self.output_details = self.interpreter.get_output_details()

        self.input_shape = self.input_details[0]['shape']
Ejemplo n.º 14
0
 def load_model(self, model):
     self.interpreter = tflite.Interpreter(
         model_path=osp.join(self.model_path, f'{model}.tflite'))
     self.model_type = model.split('_')[0]
     self.thrs = [
         float(item)
         for item in open(osp.join(
             self.thr_path, f'{model}.thr')).readline().strip().split(',')
     ]
Ejemplo n.º 15
0
def init_interpreter(model_path):
    global interpreter
    global input_details
    global output_details
    interpreter = tflite.Interpreter(model_path=model_path)
    input_details = interpreter.get_input_details()
    output_details = interpreter.get_output_details()
    interpreter.allocate_tensors()
    print('done init')
Ejemplo n.º 16
0
    def __init__(self, filename):
        self.interpreter = tflite.Interpreter(model_path=filename)
        self.interpreter.allocate_tensors()

        self.input_details = self.interpreter.get_input_details()
        self.output_details = self.interpreter.get_output_details()

        # Prev Values
        self.prev = np.zeros((5, 6))
Ejemplo n.º 17
0
def main(argv):
    assert argv

    # Load the TFLite model and allocate tensors.
    interpreter = tflite.Interpreter(model_path="yamnet.tflite")
    interpreter.allocate_tensors()
    inputs = interpreter.get_input_details()
    outputs = interpreter.get_output_details()

    # Load dataset
    yamnet_csv = load_csv('yamnet_class_map.csv')
    yamnet_classes = []
    for i in yamnet_csv[1:]:  #ignore header
        yamnet_classes.append(i[2])

    for file_name in argv:
        # Decode the WAV file.
        wav_data, sr = sf.read(file_name, dtype=np.int16)
        assert wav_data.dtype == np.int16, 'Bad sample type: %r' % wav_data.dtype
        waveform = wav_data / 32768.0  # Convert to [-1.0, +1.0]

        # Convert to mono and the sample rate expected by YAMNet.
        if len(waveform.shape) > 1:
            waveform = np.mean(waveform, axis=1)
        if sr != params.SAMPLE_RATE:
            waveform = resampy.resample(waveform, sr, params.SAMPLE_RATE)

        # Predict YAMNet classes.
        interpreter.set_tensor(
            inputs[0]['index'],
            np.expand_dims(np.array(waveform, dtype=np.float32), axis=0))
        interpreter.invoke()
        scores = interpreter.get_tensor(outputs[0]['index'])
        # Scores is a matrix of (time_frames, num_classes) classifier scores.
        # Average them along time to get an overall classifier output for the clip.
        prediction = np.mean(scores, axis=0)

        # Report the highest-scoring classes and their scores.
        top5_i = np.argsort(prediction)[::-1][:5]

        print(
            file_name, ':\n' + '\n'.join(
                '  {:12s}: {:.3f}'.format(yamnet_classes[i], prediction[i])
                for i in top5_i))

        #Encoding Classificafication (User Feedback)
        encoded_classes = [('Bark', 'A'), ('Beep, bleep', 'B'),
                           ('Buzzer', 'C'), ('Speech', 'D'),
                           ('Baby cry, infant cry', 'E')]
        encoded_classes_dict = dict(encoded_classes)

        for i in top5_i:
            if (yamnet_classes[i] in encoded_classes_dict.keys()):
                os.system("python3 serial_send.py " +
                          encoded_classes_dict[yamnet_classes[i]])
                print('Sending pattern',
                      encoded_classes_dict[yamnet_classes[i]], 'to ESP32')
 def check_for_new_model(self):
     try:
         self.Q = tflite.Interpreter(model_path='Q_new.tflite')
         self.Q.allocate_tensors()
         os.remove('Q.tflite')
         os.rename('Q_new.tflite', 'Q.tflite')
         print('new Q model loaded')
     except:
         pass
Ejemplo n.º 19
0
 def __init__(
     self,
     model_path: str = None,
     model_type: str = "mobileNet",
 ):
     if model_path is None:
         model_path = get_file(BASE_URL + model_type + ".tflite",
                               FILE_HASHES[model_type])
     self.face_recognizer = tflite.Interpreter(model_path=model_path)
Ejemplo n.º 20
0
 def __init__(self, model_dir):
     self.model_dir = model_dir
     self.interpreter = tflite.Interpreter(
         model_path=self.model_dir,
         experimental_delegates=[tflite.load_delegate(EDGETPU_SHARED_LIB)])
     self.input_details = self.interpreter.get_input_details()
     self.output_details = self.interpreter.get_output_details()
     self.image_shape = (self.input_details[0]['shape'][1],
                         self.input_details[0]['shape'][2])
Ejemplo n.º 21
0
 def init_tf2(self, model_file, label_file_name):
     possible_labels = np.asarray(self.load_labels(
         label_file_name))  # load label file and convert to list
     try:  # load tensorflow lite on rasp pi
         interpreter = tflite.Interpreter(model_file, None)
     except NameError:  # load full tensor for desktop dev
         interpreter = tf.lite.Interpreter(model_file, None)
     interpreter.allocate_tensors()
     return interpreter, possible_labels
Ejemplo n.º 22
0
 def __init__(self, outputs_queue):
     # os.chdir(sys._MEIPASS)
     self.model = tflite.Interpreter(
         "tflite_model\\optimized_model_v4.0.0_7779.tflite")
     self.path = "output\\"
     self.outputs_queue = outputs_queue
     self.front_face_cascade = cv2.CascadeClassifier(
         "cascades\\haarcascade_frontalface_default.xml")
     self.emo = ""
Ejemplo n.º 23
0
    def _load_tflite(self, tflite_path):
        experimental_delegates = []
        try:
            experimental_delegates.append(
                tflite.load_delegate(EDGETPU_SHARED_LIB,
                                     {'device': self._config.device}
                                     if self._config.device else {}))
        except AttributeError as e:
            if '\'Delegate\' object has no attribute \'_library\'' in str(e):
                print(
                    'Warning: EdgeTPU library not found. You can still run CPU models, '
                    'but if you have a Coral device make sure you set it up: '
                    'https://coral.ai/docs/setup/.')
        except ValueError as e:
            if 'Failed to load delegate from ' in str(e):
                print(
                    'Warning: EdgeTPU library not found. You can still run CPU models, '
                    'but if you have a Coral device make sure you set it up: '
                    'https://coral.ai/docs/setup/.')

        try:
            self._interpreter = tflite.Interpreter(
                model_path=tflite_path,
                experimental_delegates=experimental_delegates)
        except TypeError as e:
            if 'got an unexpected keyword argument \'experimental_delegates\'' in str(
                    e):
                self._interpreter = tflite.Interpreter(model_path=tflite_path)
        try:
            self._interpreter.allocate_tensors()
        except RuntimeError as e:
            if 'edgetpu-custom-op' in str(
                    e) or 'EdgeTpuDelegateForCustomOp' in str(e):
                raise RuntimeError(
                    'Loaded an EdgeTPU model without the EdgeTPU '
                    'library loaded. If you have a Coral device make '
                    'sure you set it up: https://coral.ai/docs/setup/.')
            else:
                raise e
        self._is_lstm = self._check_lstm()
        if self._is_lstm:
            print('Loading an LSTM model.')
            self._lstm_c = np.copy(self.input_tensor(1))
            self._lstm_h = np.copy(self.input_tensor(2))
Ejemplo n.º 24
0
def object_frame(inputQueue, outputQueue):
    # interpreter = tf.lite.Interpreter(model_path=TFLITE_PATH+'/model.tflite')
    if not tpu:
        interpreter = tflite.Interpreter(model_path=TFLITE_PATH +
                                         '/model.tflite')
    else:
        if not cust:
            interpreter = make_interpreter(TFLITE_PATH+\
             '/mobilenet_ssd_v2_face_quant_postprocess_edgetpu.tflite')
        if cust:
            interpreter = make_interpreter(TFLITE_PATH+\
             '/detect_edgetpu.tflite')
    interpreter.allocate_tensors()
    input_details = interpreter.get_input_details()
    output_details = interpreter.get_output_details()

    # keep looping
    while True:
        data_out = []
        # check to see if there is a frame in our input queue
        if not inputQueue.empty():
            # grab the frame from the input queue
            img = inputQueue.get()

            if not tpu:
                input_data = np.expand_dims(img, axis=0)
                input_data = input_data / 127.5 - 1
                input_data = np.asarray(input_data, dtype=np.float32)
                interpreter.set_tensor(input_details[0]['index'], input_data)
                interpreter.invoke()
            else:
                common.set_input(interpreter, img)
                interpreter.invoke()
                scale = (1, 1)
                objects = detect.get_objects(interpreter, confThreshold, scale)

            if not tpu:
                boxes = interpreter.get_tensor(output_details[0]['index'])[0]
                classe = interpreter.get_tensor(output_details[1]['index'])[0]
                score = interpreter.get_tensor(output_details[2]['index'])[0]
                data_out = [boxes, classe, score]
            else:
                if objects:
                    for obj in objects:
                        box = obj.bbox
                        # print('bbox:',obj.bbox)
                        xmin = int(box[0])
                        ymin = int(box[1])
                        xmax = int(box[2])
                        ymax = int(box[3])
                        data_out = [[[ymin, xmin, ymax, xmax]], obj.id,
                                    obj.score]

            # print('data_out:',data_out )

            outputQueue.put(data_out)
Ejemplo n.º 25
0
def gen(camera):
    """Video streaming generator function."""
    # Load TFLite model and allocate tensors.
    interpreter = tflite.Interpreter(
        FLAGS.model,
        experimental_delegates=[tflite.load_delegate('libedgetpu.so.1')])
    #interpreter = tflite.Interpreter(FLAGS.model)
    interpreter.allocate_tensors()

    # Get input and output tensors.
    input_details = interpreter.get_input_details()
    output_details = interpreter.get_output_details()

    while True:
        img = camera.get_frame()
        #img = cv2.flip(img, +1)

        imgShape = img.shape
        #Define crop of 2x CNN size and downsize it in tf.image.crop_and_resize
        color = (0, 255, 0)
        thickness = 3
        center = np.array([imgShape[1] / 2, imgShape[0] / 2])
        d = np.array([_HEIGHT / 2, _WIDTH / 2])
        p1 = tuple((center - d).astype(int))
        p1 = (max(p1[0], 0), max(p1[1], 0))
        p2 = tuple((center + d).astype(int))
        p2 = (min(p2[0], imgShape[0] - 1), min(p2[1], imgShape[1] - 1))
        cv2.rectangle(img, p1, p2, color, thickness)
        crop = cv2.resize(img[p1[1]:p2[1], p1[0]:p2[0]], (_WIDTH, _HEIGHT))

        before = datetime.now()
        interpreter.set_tensor(input_details[0]['index'], crop)
        interpreter.invoke()
        pred_age = output_details[0]['quantization'][
            0] * interpreter.get_tensor(output_details[0]['index'])[0][0]
        pred_age = int(round(pred_age))
        pred_gender = interpreter.get_tensor(output_details[1]['index'])[0]
        dt = datetime.now() - before

        gender = 'male'
        if (pred_gender < 1):
            gender = 'female'
        results = 'Age {}, Genderender {}, '.format(pred_age, pred_gender)

        resultsDisplay = '{:.3f}s Age {}, Gender {}'.format(
            dt.total_seconds(), pred_age, gender)

        print(results)
        font = cv2.FONT_HERSHEY_SIMPLEX
        cv2.putText(img, resultsDisplay, (10, 25), font, 1, (0, 255, 0), 2,
                    cv2.LINE_AA)
        # encode as a jpeg image and return it
        frame = cv2.imencode('.jpg', img)[1].tobytes()

        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
Ejemplo n.º 26
0
    def __init__(self):

        # pipeline_config = 'efficientdet_d0_coco17_tpu-32/pipeline.config'
        # model_dir = 'efficientdet_d0_coco17_tpu-32/checkpoint/'

        # # Load pipeline config and build a detection model
        # configs = config_util.get_configs_from_pipeline_file(pipeline_config)
        # model_config = configs['model']
        # detection_model = model_builder.build(
        #       model_config=model_config, is_training=False)

        # # Restore checkpoint
        # ckpt = tf.compat.v2.train.Checkpoint(
        #       model=detection_model)
        # ckpt.restore(os.path.join(model_dir, 'ckpt-0')).expect_partial()

        # # Retreiving labels...
        # label_map_path = 'efficientdet_d0_coco17_tpu-32/mscoco_label_map.pbtxt'  #configs['eval_input_config'].label_map_path
        # label_map = label_map_util.load_labelmap(label_map_path)
        # categories = label_map_util.convert_label_map_to_categories(
        #     label_map,
        #     max_num_classes=label_map_util.get_max_label_map_index(label_map),
        #     use_display_name=True)
        # self.category_index = label_map_util.create_category_index(categories)
        # label_map_dict = label_map_util.get_label_map_dict(label_map, use_display_name=True)

        # Load the TFLite model and allocate tensors.
        tflite_model_path = os.path.join(dir_path, "../model.tflite")
        # print(tflite_model_path)
        # self.interpreter = tf.lite.Interpreter(model_path="/home/davide/catkin_ws/src/object_detection_pico/model.tflite")
        self.interpreter = tflite.Interpreter(
            model_path=tflite_model_path
        )  # USE THIS IF ONLY THE TFLITE INTERPRETER IS INSTALLED
        self.interpreter.allocate_tensors()

        # Get input and output tensors.
        self.input_details = self.interpreter.get_input_details()
        self.output_details = self.interpreter.get_output_details()

        rospy.set_param('nn_input_size', [
            float(self.input_details[0]['shape'][1]),
            float(self.input_details[0]['shape'][2])
        ])
        # rospy.set_param('nn_input_height')

        # self.feat_extr = ssd_mobilenet_v2_feature_extractor.SSDMobileNetV2FeatureExtractor(is_training=False)

        self.bridge = CvBridge()

        self.pudDetBox = rospy.Publisher("DetectionBoxes",
                                         ObjectDetectionBoxes,
                                         queue_size=1)
        self.pubDepthImg = rospy.Publisher("/panoramicd_img",
                                           img,
                                           queue_size=1)
        self.boxes_msg = ObjectDetectionBoxes()
Ejemplo n.º 27
0
    def __init__(self):
        self.interpreter = tflite.Interpreter("tf_model_file.tflite")
        self.interpreter.allocate_tensors()
        self.input_details = self.interpreter.get_input_details()
        self.output_details = self.interpreter.get_output_details()

        # NxHxWxC, H:1, W:2
        self.height = self.input_details[0]['shape'][1]
        self.width = self.input_details[0]['shape'][2]
        self.labels = self.load_labels("class_labels.txt")
Ejemplo n.º 28
0
 def getInterpreter(self):
     if self.interpreter is not None:
         return self.interpreter
     interpreter = tflite.Interpreter(model_path=self.model_path,
                                      num_threads=self.NUM_LITE_THREADS)
     interpreter.allocate_tensors()
     self.input_details = interpreter.get_input_details()
     self.output_details = interpreter.get_output_details()
     self.interpreter = interpreter
     return interpreter
Ejemplo n.º 29
0
 def __init__(self, model_filename, labels):
     super(TFLiteObjectDetection, self).__init__(labels)
     self.interpreter = tflite.Interpreter(model_path=model_filename,
                                           experimental_delegates=[
                                               tflite.load_delegate(
                                                   _EDGETPU_SHARED_LIB, {})
                                           ])
     self.interpreter.allocate_tensors()
     self.input_index = self.interpreter.get_input_details()[0]['index']
     self.output_index = self.interpreter.get_output_details()[0]['index']
Ejemplo n.º 30
0
 def __init__(self, ellipse_model, point_model):
     self.interpreter1 = tflite.Interpreter(ellipse_model,
                                            experimental_delegates=[
                                                tflite.load_delegate(
                                                    'libedgetpu.so.1',
                                                    {"device": "usb:0"})
                                            ])
     self.interpreter2 = tflite.Interpreter(point_model,
                                            experimental_delegates=[
                                                tflite.load_delegate(
                                                    'libedgetpu.so.1',
                                                    {"device": "usb:1"})
                                            ])
     self.interpreter1.allocate_tensors()
     self.interpreter2.allocate_tensors()
     self.input1 = self.interpreter1.get_input_details()
     self.output1 = self.interpreter1.get_output_details()
     self.input2 = self.interpreter2.get_input_details()
     self.output2 = self.interpreter2.get_output_details()