class SSDMobileNet:
    def __init__(self, path_to_ckpt):
        # Load the Tensorflow Lite model.
        self._interpreter = Interpreter(model_path=path_to_ckpt)
        self._interpreter.allocate_tensors()

        # Get model details
        self._input_details = self._interpreter.get_input_details()
        self._output_details = self._interpreter.get_output_details()
        self.height = self._input_details[0]['shape'][1]
        self.width = self._input_details[0]['shape'][2]

        self.floating_model = (self._input_details[0]['dtype'] == np.float32)

    def infer(self, input_data):
        # Perform the actual detection by running the model with the image as input
        self._interpreter.set_tensor(self._input_details[0]['index'],
                                     input_data)
        self._interpreter.invoke()

        # Retrieve detection results
        boxes = self._interpreter.get_tensor(self._output_details[0]['index'])[
            0]  # Bounding box coordinates of detected objects
        classes = self._interpreter.get_tensor(
            self._output_details[1]['index'])[
                0]  # Class index of detected objects
        scores = self._interpreter.get_tensor(
            self._output_details[2]['index'])[
                0]  # Confidence of detected objects

        return boxes, classes, scores
Ejemplo n.º 2
0
def main():

    # Gather data from text file
    data = np.loadtxt('test1.txt')
    data = np.float32([data])

    # Setup interpreter for inference
    interpreter = Interpreter(model_path="my_tflite_model_4.tflite")
    interpreter.allocate_tensors()
    input_details = interpreter.get_input_details()
    output_details = interpreter.get_output_details()

    start = time.time()

    results = []
    for i in range(0, 1000):
        input_data = data[0][[i]]
        interpreter.set_tensor(input_details[0]['index'], input_data)
        interpreter.invoke()
        results.append(
            np.argmax(interpreter.get_tensor(output_details[0]['index'])))
        print(np.argmax(interpreter.get_tensor(output_details[0]['index'])))

    duration = time.time() - start
    print(duration / len(data[0]))
    return results
Ejemplo n.º 3
0
class ObjectDetectorLite():
    def __init__(self, model_path='detect.tflite', num_threads=12):
        try:
            self.interpreter = Interpreter(
                model_path=model_path)  #,num_threads=num_threads)
            #self.interpreter.set_num_threads(num_threads)
        except:
            self.interpreter = tf.lite.Interpreter(model_path=model_path)
            self.interpreter.set_num_threads(num_threads)
        self.interpreter.allocate_tensors()
        self.input_details = self.interpreter.get_input_details()
        self.output_details = self.interpreter.get_output_details()

    def _boxes_coordinates(self,
                           image,
                           boxes,
                           classes,
                           scores,
                           max_boxes_to_draw=20,
                           min_score_thresh=.5):

        if not max_boxes_to_draw:
            max_boxes_to_draw = boxes.shape[0]
        number_boxes = min(max_boxes_to_draw, boxes.shape[0])
        person_boxes = []
        for i in range(number_boxes):
            if scores is None or scores[i] > min_score_thresh:
                box = tuple(boxes[i].tolist())
                ymin, xmin, ymax, xmax = box
                _, im_height, im_width, _ = image.shape
                left, right, top, bottom = [
                    int(z) for z in (xmin * im_width, xmax * im_width,
                                     ymin * im_height, ymax * im_height)
                ]
                person_boxes.append([(left, top), (right, bottom), scores[i],
                                     LABELS[classes[i]]])
        return person_boxes

    def detect(self, image, threshold=0.1):

        # run model
        self.interpreter.set_tensor(self.input_details[0]['index'], image)
        start_time = time.time()
        self.interpreter.invoke()
        stop_time = time.time()
        print("time: ", stop_time - start_time)

        # get results
        boxes = self.interpreter.get_tensor(self.output_details[0]['index'])
        classes = self.interpreter.get_tensor(self.output_details[1]['index'])
        scores = self.interpreter.get_tensor(self.output_details[2]['index'])
        num = self.interpreter.get_tensor(self.output_details[3]['index'])

        # Find detected boxes coordinates
        return self._boxes_coordinates(image,
                                       np.squeeze(boxes[0]),
                                       np.squeeze(classes[0] + 1).astype(
                                           np.int32),
                                       np.squeeze(scores[0]),
                                       min_score_thresh=threshold)
def main():

    interpreter = Interpreter(
        model_path, experimental_delegates=[load_delegate("libedgetpu.so.1")])

    interpreter.allocate_tensors()
    input_details = interpreter.get_input_details()
    output_details = interpreter.get_output_details()

    input_queue = input_manager.get_input_queue()
    output_queue = output_manager.get_output_queue()

    print(
        '------------------------------------------------------------------------------------------'
    )
    print(
        'Started Inference server, waiting for incoming requests ... (send \'stop\' to kill server)'
    )
    print(
        '------------------------------------------------------------------------------------------'
    )

    while True:
        data = input_queue.get()
        print('recieved data with type ', type(data))

        if type(data) == str and data == "stop": break

        if type(data) == np.ndarray:

            input_image = np.expand_dims(data, axis=0)
            interpreter.set_tensor(input_details[0]["index"], input_image)
            t_begin = time.perf_counter()
            interpreter.invoke()
            inference_time = time.perf_counter() - t_begin
            boxes = interpreter.get_tensor(output_details[0]['index'])
            labels = interpreter.get_tensor(output_details[1]['index'])
            scores = interpreter.get_tensor(output_details[2]['index'])
            num = interpreter.get_tensor(output_details[3]['index'])
            print('number of detected objects: ', num, ', done in ',
                  inference_time, ' seconds')
            output_queue.put({
                "num": num,
                "boxes": boxes,
                "labels": labels,
                "scores": scores
            })

    # End while

    input_manager.shutdown()
    output_manager.shutdown()
Ejemplo n.º 5
0
class ResNet50:
    def __init__(self, frozenGraphFilename, device='cpu'):
        #self.interpreter = tf.lite.Interpreter(frozenGraphFilename)
        self.interpreter = Interpreter(frozenGraphFilename)
        self.__load_graph(device)
        self.__init_predictor()

    def __load_graph(self, device):
        # TFLite Interpreter con
        #tf.logging.set_verbosity(tf.logging.DEBUG)
        self.interpreter.allocate_tensors()

    def __init_predictor(self):
        # obtaining the input-output shapes and types
        self.input_details = self.interpreter.get_input_details()
        self.output_details = self.interpreter.get_output_details()

    def predict(self, images, params, max=5):
        x_matrix = np.array(images)
        if params[const.PRECISION] == const.FP32:
            x_matrix = np.array(images, dtype=np.float32)
        #if params[const.PRECISION] == const.INT8:

        self.interpreter.set_tensor(self.input_details[0]['index'], x_matrix)
        self.interpreter.invoke()
        predictions = self.interpreter.get_tensor(
            self.output_details[0]['index'])
        results = {'predictions': []}
        for i in range(len(predictions)):
            ordered = predictions[i].argsort()[-len(predictions[i]):][::-1]
            topn = []
            for j in ordered[:max]:
                # convert numpy.int64 to int for JSON serialization later
                topn.append(int(j))
            results['predictions'].append(topn)
            return results

    def predict_runtime(self, images, params, max=5):
        x_matrix = np.array(images)
        if params[const.PRECISION] == const.FP32:
            x_matrix = np.array(images, dtype=np.float32)

        start = time.time()
        self.interpreter.set_tensor(self.input_details[0]['index'], x_matrix)
        self.interpreter.invoke()
        predictions = self.interpreter.get_tensor(
            self.output_details[0]['index'])
        end = time.time() - start
        return end
Ejemplo n.º 6
0
def identifyAudio(path):
    interpreter = Interpreter(model_path)
    interpreter.allocate_tensors()
    input_details = interpreter.get_input_details()
    output_details = interpreter.get_output_details()

    signal, fs = librosa.load(path, sr=8000, mono=True)

    mfccs = extract_segments(signal, 10)
    pred = []
    confidence = []

    for index, _ in enumerate(mfccs):
        for _ in range(index):
            # Make prediction input (1, 13, 13, 1)
            in_tensor = np.float32(mfccs[index].reshape(
                1, mfccs[index].shape[0], mfccs[index].shape[1], 1))

            interpreter.set_tensor(input_details[0]['index'], in_tensor)
            interpreter.invoke()

            labels = ['ripe', 'unripe', 'mid-ripe']

            output_data = interpreter.get_tensor(output_details[0]['index'])
            val = output_data[0]

            v = max(val)

            if v > 0.5:  # percent of accuracy
                for i, j in enumerate(val):
                    if j == v:
                        pred.append(labels[i])
                        confidence.append(v)

    return pred, confidence
Ejemplo n.º 7
0
def main():

    data = pd.read_csv('fbdh1.csv')
    data.drop(['Flow'], axis=1, inplace=True)
    scaler = preprocessing.MinMaxScaler(feature_range=(0, 1))
    scaler.fit(data)
    data = scaler.transform(data)
    data = np.float32(data.values)

    interpreter = Interpreter(model_path="my_tflite_model_4.tflite")
    interpreter.allocate_tensors()
    input_details = interpreter.get_input_details()
    output_details = interpreter.get_output_details()

    predictions = []

    for i in range(0, len(data)):

        input_data = data[i]
        input_data = map(scale, input_data)
        interpreter.set_tensor(input_details[0]['index'], input_data)
        interpreter.invoke()
        predictions.apppend(interpreter.get_tensor(output_details[0]['index']))

    return predictions
Ejemplo n.º 8
0
    def run_inference(self, model, rtol=1e-04, atol=1e-05):
        # Set batch dimension to 1
        input_shape = (1, ) + model.input_shape[1:]
        # Generate data
        data = np.random.random_sample(input_shape).astype(np.float32)

        # Get result in tensorflow
        tf_result = model.predict(data)

        # Convert to tflite
        converter = lqce.ModelConverter(model)
        tflite_model = converter.convert()

        # Setup tflite
        interpreter = Interpreter(model_content=tflite_model)
        interpreter.allocate_tensors()

        input_details = interpreter.get_input_details()
        output_details = interpreter.get_output_details()

        tflite_input_type = input_details[0]["dtype"]
        tflite_input_shape = input_details[0]["shape"]

        self.assertAllEqual(tflite_input_shape, data.shape)
        self.assertEqual(tflite_input_type, data.dtype)

        interpreter.set_tensor(input_details[0]["index"], data)

        # Run tflite
        interpreter.invoke()

        # Get tflite result
        tflite_result = interpreter.get_tensor(output_details[0]["index"])

        self.assertAllClose(tf_result, tflite_result, rtol=rtol, atol=atol)
Ejemplo n.º 9
0
class NetworkExecutor(object):
    def __init__(self, model_file):

        self.interpreter = Interpreter(model_file, num_threads=3)
        self.interpreter.allocate_tensors()
        _, self.input_height, self.input_width, _ = self.interpreter.get_input_details(
        )[0]['shape']
        self.tensor_index = self.interpreter.get_input_details()[0]['index']

    def get_output_tensors(self):

        output_details = self.interpreter.get_output_details()
        tensor_indices = []
        tensor_list = []

        for output in output_details:
            tensor = np.squeeze(self.interpreter.get_tensor(output['index']))
            tensor_list.append(tensor)

        return tensor_list

    def run(self, image):
        if image.shape[1:2] != (self.input_height, self.input_width):
            img = cv2.resize(image, (self.input_width, self.input_height))
        img = preprocess(img)
        self.interpreter.set_tensor(self.tensor_index, img)
        self.interpreter.invoke()
        return self.get_output_tensors()
Ejemplo n.º 10
0
def main():

    data = np.loadtxt('test1.txt')
    data = np.float32([data])

    interpreter = Interpreter(model_path="my_tflite_model_4.tflite")
    interpreter.allocate_tensors()
    input_details = interpreter.get_input_details()
    output_details = interpreter.get_output_details()

    #    input_shape = input_details[0]['shape']
    #    input_data = np.array(np.random.random_sample(input_shape), dtype=np.float32)

    #    interpreter.set_tensor(input_details[0]['index'], input_data)
    #    interpreter.invoke()
    #    print(interpreter.get_tensor(output_details[0]['index']))

    start = time.time()
    for i in range(0, len(data[0])):
        input_data = data[0][[i]]
        interpreter.set_tensor(input_details[0]['index'], input_data)
        interpreter.invoke()
        print(interpreter.get_tensor(output_details[0]['index']))

    duration = time.time() - start
    print(duration / len(data[0]))
Ejemplo n.º 11
0
class TFLiteImageEncoder(object):
    def __init__(self,
                 tflite_filename,
                 input_name=None,
                 output_name=None,
                 num_threads=1):
        self.interpreter = Interpreter(model_path=tflite_filename,
                                       num_threads=num_threads)
        self.interpreter.allocate_tensors()
        self.input_detail = self.interpreter.get_input_details()[0]
        self.output_detail = self.interpreter.get_output_details()[0]
        self.input_tensor_index = self.input_detail['index']
        self.output_tensor_index = self.output_detail['index']
        self.image_shape = self.input_detail['shape'][1:]
        self.height, self.width, _ = self.image_shape.tolist()
        self.feature_dim = self.output_detail['shape'][1]
        self.max_batch_size = self.output_detail['shape'][0]

    def __call__(self, data_in, batch_size=1):
        out = np.zeros((len(data_in), self.feature_dim), np.float32)

        def _internal_fn(patches):
            patches2 = np.array(patches).astype(np.float32)
            self.interpreter.set_tensor(self.input_tensor_index, patches2)
            self.interpreter.invoke()
            return self.interpreter.get_tensor(self.output_tensor_index)

        if self.max_batch_size and batch_size > self.max_batch_size:
            batch_size = self.max_batch_size

        _run_in_batches(_internal_fn, data_in, out, batch_size)
        return out
def main():
    args = get_cmd()
    if args.edgetpu:
        interpreter = Interpreter(args.model, experimental_delegates=[
                                  load_delegate('libedgetpu.so.1.0')])
    else:
        interpreter = Interpreter(args.model)
    interpreter.allocate_tensors()
    input_details = interpreter.get_input_details()
    output_details = interpreter.get_output_details()
    width = input_details[0]['shape'][2]
    height = input_details[0]['shape'][1]
    labels = load_label(args.labels)
    cap = cv2.VideoCapture(args.source)
    image_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
    image_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
    while(True):
        ret, frame = cap.read()
        frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        frame_resized = cv2.resize(frame_rgb, (width, height))
        input_data = numpy.expand_dims(frame_resized, axis=0)
        interpreter.set_tensor(input_details[0]['index'], input_data)
        interpreter.invoke()
        boxes = interpreter.get_tensor(output_details[0]['index'])[0]
        classes = interpreter.get_tensor(output_details[1]['index'])[0]
        scores = interpreter.get_tensor(output_details[2]['index'])[0]
        for i in range(len(scores)):
            if ((scores[i] > args.threshold) and (scores[i] <= 1.0)):
                ymin = int(max(1, (boxes[i][0] * image_height)))
                xmin = int(max(1, (boxes[i][1] * image_width)))
                ymax = int(min(image_height, (boxes[i][2] * image_height)))
                xmax = int(min(image_width, (boxes[i][3] * image_width)))
                cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (10, 255, 0), 4)
                object_name = labels[int(classes[i])]
                label = '%s: %d%%' % (object_name, int(scores[i]*100))
                labelSize, baseLine = cv2.getTextSize(
                    label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2)
                label_ymin = max(ymin, labelSize[1] + 10)
                cv2.rectangle(frame, (xmin, label_ymin-labelSize[1]-10), (
                    xmin+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED)
                cv2.putText(frame, label, (xmin, label_ymin-7),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2)
        cv2.imshow('Object detector', frame)
        if cv2.waitKey(1) == ord('q'):
            break
    cap.release()
    cv2.destroyAllWindows()
Ejemplo n.º 13
0
class Classifier:
    """
    Perform image classification with the given model. The model is a .h5 file
    which if the classifier can not find it at the path it will download it
    from neuralet repository automatically.
    :param config: Is a Config instance which provides necessary parameters.
    """

    def __init__(self, config):
        self.config = config
        self.model_name = "OFMClassifier_edgetpu.tflite"
        self.model_path = 'libs/classifiers/edgetpu/' + self.model_name
        # Frames Per Second
        self.fps = None
        if not os.path.isfile(self.model_path):
            url= "https://raw.githubusercontent.com/neuralet/neuralet-models/master/edge-tpu/OFMClassifier/OFMClassifier_edgetpu.tflite"
            print("model does not exist under: ", self.model_path, "downloading from ", url)
            wget.download(url, self.model_path)

        # Load TFLite model and allocate tensors
        self.interpreter = Interpreter(self.model_path, experimental_delegates=[load_delegate("libedgetpu.so.1")])
        self.interpreter.allocate_tensors()
        # Get the model input and output tensor details
        self.input_details = self.interpreter.get_input_details()
        self.output_details = self.interpreter.get_output_details() 


    def inference(self, resized_rgb_image) -> list:
            """
            Inference function sets input tensor to input image and gets the output.
            The interpreter instance provides corresponding class id output which is used for creating result
            Args:
                resized_rgb_image: Array of images with shape (no_images, img_height, img_width, channels)
            Returns:
                result: List of class id for each input image. ex: [0, 0, 1, 1, 0]
                scores: The classification confidence for each class. ex: [.99, .75, .80, 1.0]
            """
            if np.shape(resized_rgb_image)[0] == 0:
                return [], []
            resized_rgb_image = (resized_rgb_image * 255).astype("uint8")
            result = []
            net_results = []
            for img in resized_rgb_image:
                img = np.expand_dims(img, axis=0)
                self.interpreter.set_tensor(self.input_details[0]["index"], img)
                t_begin = time.perf_counter()
                self.interpreter.invoke()
                inference_time = time.perf_counter() - t_begin  # Second
                self.fps = convert_infr_time_to_fps(inference_time)
                net_output = self.interpreter.get_tensor(self.output_details[0]['index'])[0]
                net_results.append(net_output)
                result.append(np.argmax(net_output))  # returns class id

            # TODO: optimized without for
            scores = []
            for i, itm in enumerate(net_results):
                scores.append(1)

            return result, scores
class ImageClassifier:
    def __init__(self, model_filename='model.tflite'):
        # load the .tflite model with the TF Lite runtime interpreter
        self.interpreter = Interpreter(model_filename)
        self.interpreter.allocate_tensors()

        # get a handle to the input tensor details
        input_details = self.interpreter.get_input_details()[0]

        # get the input tensor index
        self.input_index = input_details['index']

        # get the shape of the input tensor, so we can rescale the
        # input image to the appropriate size when making predictions
        input_shape = input_details['shape']
        self._input_height = input_shape[1]
        self._input_width = input_shape[2]

        # get a handle to the input tensor details
        output_details = self.interpreter.get_output_details()[0]

        # get the output tensor index
        self.output_index = output_details['index']

    def predict(self, image):
        # convert the image to grayscale and resize
        grayscale_image = image.convert('L').resize(
            (self._input_width, self._input_height))

        # convert the image to a numpy array
        input = np.asarray(grayscale_image.getdata(), dtype=np.uint8).reshape(
            (1, self._input_width, self._input_height))

        # assign the numpy array value to the input tensor
        self.interpreter.set_tensor(self.input_index, input)

        # invoke the operation
        self.interpreter.invoke()

        # get output tensor value
        output = self.interpreter.get_tensor(self.output_index)

        # return the prediction, there was only one input
        return output[0]

    def classify(self, image):
        # get the prediction, output with be array of 10
        prediction = self.predict(image)

        # find the index with the largest value
        classification = int(np.argmax(prediction))

        # get the score for the largest index
        score = float(prediction[classification])

        return classification, score
Ejemplo n.º 15
0
class Detector(object):
    def __init__(self,
                 model_path=DEFAULT_MODEL_PATH,
                 labels_path=DEFAULT_LABEL_PATH):

        self.labels = self.load_labels(labels_path)
        self.interpreter = Interpreter(model_path)
        self.interpreter.allocate_tensors()

    def set_input_tensor(self, image):
        tensor_index = self.interpreter.get_input_details()[0]["index"]
        input_tensor = self.interpreter.tensor(tensor_index)()[0]
        input_tensor[:, :] = image

    def get_output_tensor(self, index):
        output_details = self.interpreter.get_output_details()[index]
        tensor = np.squeeze(
            self.interpreter.get_tensor(output_details["index"]))
        return tensor

    def detect_objects(self, image, threshold=0.5):
        self.set_input_tensor(image)
        self.interpreter.invoke()

        # Get all output details
        boxes = self.get_output_tensor(0)
        classes = self.get_output_tensor(1)
        scores = self.get_output_tensor(2)
        count = int(self.get_output_tensor(3))
        results = []
        for i in range(count):
            if scores[i] >= threshold:
                result = {
                    'bounding_box': boxes[i],
                    'class_id': classes[i],
                    'class': self.labels[classes[i]],
                    'score': scores[i]
                }
                results.append(result)
        print(results)
        return results

    def load_labels(self, path):
        """Loads the labels file. Supports files with or without index numbers."""
        with open(path, 'r', encoding='utf-8') as f:
            lines = f.readlines()
            labels = {}
            for row_number, content in enumerate(lines):
                pair = re.split(r'[:\s]+', content.strip(), maxsplit=1)
                if len(pair) == 2 and pair[0].strip().isdigit():
                    labels[int(pair[0])] = pair[1].strip()
                else:
                    labels[row_number] = pair[0].strip()
            return labels
Ejemplo n.º 16
0
class Model:
    def __init__(self, path_to_model):
        self.interpreter = Interpreter(model_path=path_to_model)
        self.interpreter.allocate_tensors()
        self.input_details = self.interpreter.get_input_details()
        self.output_details = self.interpreter.get_output_details()
        self.height = self.input_details[0]['shape'][1]
        self.width = self.input_details[0]['shape'][2]
    
    def resize(self, frame):
        frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        frame_resized = cv2.resize(frame_rgb, (self.width, self.height))
        input_data = np.expand_dims(frame_resized, axis=0)
        return input_data
    
    def detect(self, input_data):
        self.interpreter.set_tensor(self.input_details[0]['index'],input_data)
        self.interpreter.invoke()
        self.boxes = self.interpreter.get_tensor(self.output_details[0]['index'])[0] # Bounding box coordinates of detected objects
        self.classes = self.interpreter.get_tensor(self.output_details[1]['index'])[0] # Class index of detected objects
        self.scores = self.interpreter.get_tensor(self.output_details[2]['index'])[0] # Confidence of detected objects
    
    def get_persons(self):
        persons = []
        for i in range(len(self.scores)):
            if ((self.scores[i] > min_det_threshold) and (self.scores[i] <= 1.0)):
                object_name = labels[int(self.classes[i])] # Look up object name from "labels" array using class index
                if object_name == "person":
                    # Get bounding box coordinates
                    # Interpreter can return coordinates that are outside of image dimensions, need to force them to be within image using max() and min()
                    ymin = int(max(1,(self.boxes[i][0] * imH)))         # highest y
                    xmin = int(max(1,(self.boxes[i][1] * imW)))         # left x
                    ymax = int(min(imH,(self.boxes[i][2] * imH)))       # lowest y
                    xmax = int(min(imW,(self.boxes[i][3] * imW)))       # right x
                    
                    persons.append(np.array([ymin, xmin, ymax, xmax]))
        return persons
    
    def get_output(self, frame):
        self.detect(self.resize(frame))
        return self.get_persons()
Ejemplo n.º 17
0
class Detector(object):
    def __init__(self, path_to_model, threshold):
        self.threshold = threshold
        self.__camera = picamera.PiCamera(resolution=(640, 480), framerate=30)
        self.__camera.start_preview()

        self.__is_cat_detected = False

        self.__stream = io.BytesIO()

        self.__interpreter = Interpreter(path_to_model)
        self.__interpreter.allocate_tensors()
        self.__img_input_size = self.__interpreter.get_input_details(
        )[0]['shape'][1:3]

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.__camera.close()

    def check(self):
        result = True
        try:
            self.__stream.seek(0)
            self.__camera.capture(self.__stream, 'jpeg')
            image = Image.open(self.__stream).convert('RGB').resize(
                self.__img_input_size, Image.ANTIALIAS)

            self.__is_cat_detected = self.__is_cat(image)

            self.__stream.seek(0)
            self.__stream.truncate()
        except BaseException as err:
            print(err)
            result = False
        finally:
            self.__camera.stop_preview()
        return result

    def is_detect(self):
        return self.__is_cat_detected

    def __is_cat(self, image):
        tensor_index = self.__interpreter.get_input_details()[0]['index']
        self.__interpreter.tensor(tensor_index)()[0][:, :] = image

        self.__interpreter.invoke()
        output_details = self.__interpreter.get_output_details()[0]
        output = np.squeeze(
            self.__interpreter.get_tensor(output_details['index']))

        return output < self.threshold if self.threshold < 0 else output > self.threshold
Ejemplo n.º 18
0
class Classifier(object):
    def __init__(self, model_path, label_path):
        self.interpreter = Interpreter(model_path=model_path)
        self.interpreter.allocate_tensors()
        self.labels = load_labels(label_path)

        self.input_details = self.interpreter.get_input_details()[0]
        self.input_scale, self.input_zero_point = self.input_details[
            'quantization']
        self.input_tensor = self.input_details['index']

        input_shape = self.input_details['shape']
        self.input_size = tuple(input_shape[:2] if len(input_shape) ==
                                3 else input_shape[1:3])

        self.output_details = self.interpreter.get_output_details()[0]
        self.output_scale, self.output_zero_point = self.output_details[
            'quantization']
        self.output_tensor = self.output_details['index']

        logger.info(f"Loaded model {model_path}")
        logger.info(
            f"[Input] dtype: {self.input_details['dtype']}, scale: {self.input_scale}, "
            f"zero_point: {self.input_zero_point}")
        logger.info(
            f"[Output] dtype: {self.output_details['dtype']}, scale: {self.output_scale}, "
            f"zero_point: {self.output_zero_point}")

    def classify(self, image, top=5):
        if len(image.shape) < 4:
            image = np.expand_dims(image, axis=0)

        if self.input_details['dtype'] == np.uint8:
            image = image / self.input_scale + self.input_zero_point

        start_time = time.time()

        self.interpreter.set_tensor(self.input_tensor, image)
        self.interpreter.invoke()
        output = self.interpreter.get_tensor(self.output_tensor)[0]

        elapsed = time.time() - start_time
        logger.info("Elapsed {:.6f}s".format(elapsed))

        if self.output_details['dtype'] == np.uint8:
            output = self.output_scale * (output - self.output_zero_point)

        top_k_idx = np.argsort(output)[::-1][:top]
        result = OrderedDict()
        for idx in top_k_idx:
            result[self.labels[idx]] = output[idx]

        return result
Ejemplo n.º 19
0
def handler(event, context):
    if not event:
        return error('Event was None')

    # Load model.txt
    s3_client = boto3.client('s3')
    s3_client.download_file(bucket_name, model_name_file, model_name_file_path)
    model_name = open(model_name_file_path, 'r').read().strip()
    model_path = '/tmp/' + model_name

    if not os.path.exists(model_path):
        s3_client.download_file(bucket_name, model_name, model_path)
        download_model()
        print('Downloaded model')

    # Create interpreter
    interpreter = Interpreter(model_path=model_path)
    interpreter.allocate_tensors()
    input_details = interpreter.get_input_details()
    output_details = interpreter.get_output_details()

    print('Loaded model')

    # Extract image
    try:
        image = get_image(event)
        print('Image loaded')
    except Exception as e:
        return error('Failed to load image: ' + str(e), event)

    # Input data and predict
    interpreter.set_tensor(input_details[0]['index'], image)
    interpreter.invoke()
    prediction_data = interpreter.get_tensor(output_details[0]['index'])
    prediction_idx = np.argmax(prediction_data[0])
    predicted_label = labels[prediction_idx]
    print('Prediction complete')

    return {
        'statusCode':
        200,
        'body':
        json.dumps({
            'label': predicted_label,
            'message': 'Classified as ' + predicted_label,
            'data': prediction_data[0].tolist()
        }),
        'headers': {
            'Content-Type': 'application/json'
        }
    }
Ejemplo n.º 20
0
def compute(model_path):
    now = time.monotonic()
    intp = Interpreter(model_path)
    x = intp.tensor(intp.get_input_details()[0]['index'])
    iy = intp.get_output_details()[0]['index']
    intp.allocate_tensors()
    t1 = time.monotonic() - now
    now = time.monotonic()
    
    for i in range(WARMUP):
        #x().fill(CONSTANT)
        x =np.random.rand()
        intp.invoke()
        y = intp.get_tensor(iy)
    t2 = time.monotonic() - now
    now = time.monotonic()
    for i in range(ITER):
        #x().fill(CONSTANT)
        x =np.random.rand()
        intp.invoke()
        y = intp.get_tensor(iy)
    t3 = time.monotonic() - now    
    return t1, t2/float(WARMUP), t3/float(ITER)
Ejemplo n.º 21
0
class Classifier:
    def __init__(self):
        model_name = 'coco_ssd_mobilenet_v1_1.0_quant_2018_06_29/detect.tflite'

        if rasp:
            self.model = Interpreter(model_name)
        else:
            self.model = tf.lite.Interpreter(model_name)

        self.model.allocate_tensors()
        self.model_in = self.model.get_input_details()
        self.model_out = self.model.get_output_details()

        print(self.model_in)
        print(self.model_out)

    def predict(self, img_in):
        img_in = cv.resize(img_in, (300, 300))
        img_out = img_in.copy()

        self.model.set_tensor(self.model_in[0]['index'], [img_in])
        self.model.invoke()

        rects = self.model.get_tensor(
            self.model_out[0]['index'])
        classes = self.model.get_tensor(
            self.model_out[1]['index'])
        scores = self.model.get_tensor(
            self.model_out[2]['index'])

        for index, score in enumerate(scores[0]):
            if classes[0][index] == 0:
                if score > 0.5:
                    img_out = draw_rect(img_in, rects[0][index])

        return img_out
Ejemplo n.º 22
0
class ImageClassifier:
    def __init__(self):
        try:
            self.interpreter = Interpreter(model_path='model.tflite')
            self.interpreter.allocate_tensors()
            _, self.height, self.width, _ = self.interpreter.get_input_details(
            )[0]['shape']
            print('model successfully loaded')
        except Exception as e:
            print("error:", e)
            return

    def get_output_tensor(self, index):
        """Returns the output tensor at the given index."""
        output_details = self.interpreter.get_output_details()[index]
        tensor = np.squeeze(
            self.interpreter.get_tensor(output_details['index']))

        # # If the model is quantized (uint8 data), then dequantize the results
        # if output_details['dtype'] == np.uint8:
        #     scale, zero_point = output_details['quantization']
        #     tensor = scale * (tensor - zero_point)

        return tensor

    def set_input_tensor(self, image):
        tensor_index = self.interpreter.get_input_details()[0]['index']
        input_tensor = self.interpreter.tensor(tensor_index)()[0]
        input_tensor[:, :] = image

    def __call__(self, stream, top_k=1):
        # start_time = time()
        image = Image.open(stream).convert('RGB').resize(
            (self.width, self.height), Image.ANTIALIAS)
        image = np.array(image).astype('float') / 255.0

        self.set_input_tensor(image)
        self.interpreter.invoke()

        classes = self.get_output_tensor(1)
        scores = self.get_output_tensor(2)

        # print('classes', classes)
        # print('scores', scores)

        index = np.argmax(scores)
        # elapsed_ms = (time() - start_time) * 1000
        return int(classes[index]), scores[index]
Ejemplo n.º 23
0
class interpreter():
    def __init__(self, modelPath = 'laneDirectionModel_Lite.tflite', \
                 classLabelPath = 'classiferAngle.txt'):
        
        self.modelPath = modelPath
        self.classLabelPath = classLabelPath
        self.inputImg = None
        
        self.labels = {}
        self.load_labels()

        self.interpreter = Interpreter(self.modelPath)
        self.interpreter.allocate_tensors()


    def load_labels(self):
        with open(self.classLabelPath, 'r', encoding='utf-8') as f:
            lines = f.readlines()
            for row_number, content in enumerate(lines):
                self.labels[row_number] = int(content)


    def setTensors(self, Img):

        self.inputImg = cvtColor(Img, COLOR_RGB2GRAY)
        self.inputImg = self.inputImg.astype(np.float32)
        self.inputImg = np.expand_dims(self.inputImg, 0)
        self.inputImg = np.expand_dims(self.inputImg, -1)
        
        input_details = self.interpreter.get_input_details()
        input_shape = input_details[0]['shape']
        self.interpreter.set_tensor(input_details[0]['index'], self.inputImg)


    def predict(self):
        
        self.interpreter.invoke()
        outputDetails = self.interpreter.get_output_details()[0]
        output = np.squeeze(self.interpreter.get_tensor(outputDetails['index']))
        ordered = np.argpartition(-output, 1)

#         scale, zeroPoint = outputDetails['quantization']
#         output = scale * (output - zeroPoint)
        predictedClass, probability = ordered[0], output[ordered[0]]

        #Equation to convert class label to angle
        return int(self.labels[predictedClass])
def main():

    interpreter = Interpreter(
        model_path, experimental_delegates=[load_delegate("libedgetpu.so.1")])

    interpreter.allocate_tensors()
    input_details = interpreter.get_input_details()
    output_details = interpreter.get_output_details()

    input_queue = input_manager.get_input_queue()
    output_queue = output_manager.get_output_queue()

    print(
        '------------------------------------------------------------------------------------------'
    )
    print(
        'Started Inference server, waiting for incoming requests ... (send \'stop\' to kill server)'
    )
    print(
        '------------------------------------------------------------------------------------------'
    )

    while True:
        data = input_queue.get()
        print('recieved data with type ', type(data))

        if type(data) == str:
            if data == "stop": break
            else: pint('data is: ', data)

        if type(data) == list:
            data = np.array(data, dtype=np.uint8)
            input_image = np.expand_dims(data, axis=0)
            interpreter.set_tensor(input_details[0]["index"], input_image)
            t_begin = time.perf_counter()
            interpreter.invoke()
            inference_time = time.perf_counter() - t_begin
            net_output = interpreter.get_tensor(output_details[0]["index"])
            print('inference output: ', net_output, ', done in ',
                  inference_time, ' seconds')
            output_queue.put(net_output)

    # End while

    input_manager.shutdown()
    output_manager.shutdown()
def label_image(model_location, label_location, image_location):
    interpreter = Interpreter(model_path=model_location, num_threads=None)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    output_details = interpreter.get_output_details()

    # check the type of the input tensor
    floating_model = input_details[0]['dtype'] == np.float32

    # NxHxWxC, H:1, W:2
    height = input_details[0]['shape'][1]
    width = input_details[0]['shape'][2]
    img = Image.open(image_location).resize((width, height))

    # add N dim
    input_data = np.expand_dims(img, axis=0)

    if floating_model:
        input_data = (np.float32(input_data) - 127.5) / 127.5

    interpreter.set_tensor(input_details[0]['index'], input_data)

    start_time = time.time()
    interpreter.invoke()
    stop_time = time.time()

    output_data = interpreter.get_tensor(output_details[0]['index'])
    results = np.squeeze(output_data)

    top_k = results.argsort()[-5:][::-1]
    labels = load_labels(label_location)

    #print('{:08.6f}: {}'.format(float(results[top_k[0]]), labels[top_k[0]]))
    #print('time: {:.3f}ms'.format((stop_time - start_time) * 1000))

    prediction = str(labels[top_k[0]])

    if prediction == "Paper" or prediction == "Cardboard":
        return "Paper"
    if prediction == "Metal" or prediction == "Glass" or prediction == "Recyc_Plastic":
        return "Dry"
    if prediction == "Non_Recyc_Plastic" or prediction == "Foil":
        return "General"
    if prediction == "Food":
        return "Organic"
Ejemplo n.º 26
0
def main():

    data = np.loadtxt('test1.txt')
    data = np.float32([data])
    
    interpreter = Interpreter(model_path = "Coral.tflite")
    interpreter.allocate_tensors()
    input_details = interpreter.get_input_details()
    output_details = interpreter.get_output_details()
    
    for i in range(0, len(data[0])):
        
        input_data = data[0][[i]]
        interpreter.set_tensor(input_details[0]['index'], input_data)
        interpreter.invoke()
        pred = interpreter.get_tensor(output_details[0]['index'])
        print(pred.argmax())
Ejemplo n.º 27
0
def redwhitepredict():
    # user input
    parser = reqparse.RequestParser()
    parser.add_argument(
        'characteristics',
        type=str,
        required=True,
        help="This is expecting a selection of wine characteristics",
        action='append')
    args = parser.parse_args()
    characteristics = args['characteristics'][0].split(' ')

    #  Load the model

    interpreter = Interpreter(
        model_path="Red_and_White_Analysis/redorwhite_model_trained.tflite")
    interpreter.allocate_tensors()
    input_details = interpreter.get_input_details()
    output_details = interpreter.get_output_details()

    # Test the model on random input data.
    input_shape = input_details[0]['shape']
    user_input_characteristics = [float(i) for i in characteristics]
    print(user_input_characteristics)

    input_data = np.array([user_input_characteristics], dtype=np.float32)
    interpreter.set_tensor(input_details[0]['index'], input_data)

    interpreter.invoke()

    # The function `get_tensor()` returns a copy of the tensor data.
    # Use `tensor()` in order to get a pointer to the tensor.
    output_data = interpreter.get_tensor(output_details[0]['index'])
    print(output_data)

    #redorwhite_model = load_model('Red_and_White_Analysis/redorwhite_model_trained.h5')

    # #predict the wine class based on model and save output to 'out'
    # user_input_characteristics=[float(i) for i in characteristics]

    #run model with user input
    #result_characteristics = redorwhite_model.predict_classes([user_input_characteristics])
    result_characteristics = "White" if output_data[0][0] == 1 else "Red"
    return jsonify({'wine_selection': result_characteristics})
Ejemplo n.º 28
0
def run_inference(disp, waveform):

    # get spectrogram data
    spectrogram = get_spectrogram(waveform)

    if not len(spectrogram):
        #disp.show_txt(0, 0, "Silent. Skip...", True)
        print("Too silent. Skipping...")
        #time.sleep(1)
        return

    spectrogram1 = np.reshape(
        spectrogram, (-1, spectrogram.shape[0], spectrogram.shape[1], 1))

    if VERBOSE_DEBUG:
        print("spectrogram1: %s, %s, %s" %
              (type(spectrogram1), spectrogram1.dtype, spectrogram1.shape))

    # load TF Lite model
    interpreter = Interpreter('simple_audio_model_numpy.tflite')
    interpreter.allocate_tensors()

    # Get input and output tensors.
    input_details = interpreter.get_input_details()
    output_details = interpreter.get_output_details()

    #print(input_details)
    #print(output_details)

    input_shape = input_details[0]['shape']
    input_data = spectrogram1.astype(np.float32)
    interpreter.set_tensor(input_details[0]['index'], input_data)

    print("running inference...")
    interpreter.invoke()

    output_data = interpreter.get_tensor(output_details[0]['index'])
    yvals = output_data[0]
    commands = ['go', 'down', 'up', 'stop', 'yes', 'left', 'right', 'no']

    if VERBOSE_DEBUG:
        print(output_data[0])
    print(">>> " + commands[np.argmax(output_data[0])].upper())
    disp.show_txt(0, 12, commands[np.argmax(output_data[0])].upper(), True)
Ejemplo n.º 29
0
    def predict(self, data):

        try:
            data = np.array(data, np.float32)
            data = np.expand_dims(data, axis=0)

            data = signal.resample(data, self.sample_rate, axis=1)

            assert data.shape == (1, 16000)
            # Normalize short ints to floats in range [-1..1).
            #data = data / float(np.max(np.absolute(data)))
            data = np.array(data, np.float32) / 32768.0

            # prepare TFLite interpreter
            with open(os.path.join(self.path_model, self.name_model),
                      'rb') as f:
                model_content = f.read()

            interpreter = Interpreter(model_content=model_content)
            interpreter.allocate_tensors()

            input_details = interpreter.get_input_details()
            output_details = interpreter.get_output_details()

            padded_input = np.zeros((1, 16000), dtype=np.float32)
            padded_input[:, :data.shape[1]] = data

            # set input audio data (by default data at index 0)
            interpreter.set_tensor(input_details[0]['index'],
                                   padded_input.astype(np.float32))

            # run inference
            interpreter.invoke()

            # get output: classification
            out_tflite = interpreter.get_tensor(output_details[0]['index'])

            out_tflite_argmax = np.argmax(out_tflite)

            return out_tflite_argmax

        except (AssertionError):
            self.stream = False
            return -1
Ejemplo n.º 30
0
class Classifier:
    def __init__(self, label_file, model_file):
        self.labels = self.load_labels(label_file)
        self.interpreter = Interpreter(model_file)
        self.interpreter.allocate_tensors()
        _, self.height, self.width, _ = self.interpreter.get_input_details(
        )[0]['shape']

    def load_labels(self, path):
        with open(path, 'r') as f:
            return {i: line.strip() for i, line in enumerate(f.readlines())}

    def set_input_tensor(self, image):
        tensor_index = self.interpreter.get_input_details()[0]['index']
        input_tensor = self.interpreter.tensor(tensor_index)()[0]
        input_tensor[:, :] = image

    def classify(self, original_image, top_k=1):
        start_time = time.time()
        image = cv2.resize(original_image, (self.height, self.width))
        """Returns a sorted array of classification results."""
        self.set_input_tensor(image)
        self.interpreter.invoke()
        output_details = self.interpreter.get_output_details()[0]
        output = np.squeeze(
            self.interpreter.get_tensor(output_details['index']))

        # If the model is quantized (uint8 data), then dequantize the results
        if output_details['dtype'] == np.uint8:
            scale, zero_point = output_details['quantization']
            output = scale * (output - zero_point)

        ordered = np.argpartition(-output, top_k)
        results = [(i, output[i]) for i in ordered[:top_k]]

        elapsed_ms = (time.time() - start_time) * 1000
        label_id, prob = results[0]
        text = 'Class: %s Confidence: %.2f  TIME: %.1fms' % (
            self.labels[label_id], prob, elapsed_ms)
        cv2.putText(original_image, text, (10, 20), cv2.FONT_HERSHEY_SIMPLEX,
                    self.width / 400, (0, 0, 255), 2, True)

        return cv2.imencode('.jpg', original_image)[1].tobytes()