Ejemplo n.º 1
0
    def __init__(self, model_name: str, label_file: str) -> None:
        """Initialize a pose classification model.

    Args:
      model_name: Name of the TFLite pose classification model.
      label_file: Path of the label list file.
    """

        # Append TFLITE extension to model_name if there's no extension
        _, ext = os.path.splitext(model_name)
        if not ext:
            model_name += '.tflite'

        # Initialize the TFLite model.
        interpreter = Interpreter(model_path=model_name, num_threads=4)
        interpreter.allocate_tensors()

        self._input_index = interpreter.get_input_details()[0]['index']
        self._output_index = interpreter.get_output_details()[0]['index']
        self._interpreter = interpreter

        self.pose_class_names = self._load_labels(label_file)
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    #parser.add_argument('--model', help='File path of .tflite file.', required=True)
    #parser.add_argument('--labels', help='File path of labels file.', required=True)
    parser.add_argument('--image',
                        help='File path of image file to detect.',
                        required=True)
    args = parser.parse_args()

    labels = load_labels(
        "/home/scripts/models/labels_mobilenet_quant_v1_224.txt")
    interpreter = Interpreter(
        "/home/scripts/models/mobilenet_v1_1.0_224_quant.tflite")
    interpreter.allocate_tensors()
    _, height, width, _ = interpreter.get_input_details()[0]['shape']

    image = Image.open(args.image)
    image = image.convert('RGB').resize((width, height), Image.ANTIALIAS)

    results = classify_image(interpreter, image)
    print("The image contains:", labels[results[0][0]])
def main():

    interpreter = Interpreter(model_path, experimental_delegates=[load_delegate("libedgetpu.so.1")])

    interpreter.allocate_tensors()
    input_details = interpreter.get_input_details()
    output_details = interpreter.get_output_details()

    input_queue = input_manager.get_input_queue()
    output_queue = output_manager.get_output_queue()

    print('------------------------------------------------------------------------------------------')
    print('Started Inference server, waiting for incoming requests ... (send \'stop\' to kill server)')
    print('------------------------------------------------------------------------------------------')

    while True:
        data = input_queue.get()
        print('recieved data with type ', type(data))

        if type(data) == str and data == "stop": break
       
        if type(data) == np.ndarray:

            input_image = np.expand_dims(data, axis=0)
            interpreter.set_tensor(input_details[0]["index"], input_image)
            t_begin = time.perf_counter()
            interpreter.invoke()
            inference_time = time.perf_counter() - t_begin
            boxes = interpreter.get_tensor(output_details[0]['index'])
            labels = interpreter.get_tensor(output_details[1]['index'])
            scores = interpreter.get_tensor(output_details[2]['index'])
            num = interpreter.get_tensor(output_details[3]['index'])
            print('number of detected objects: ', num , ', done in ', inference_time, ' seconds' )
            output_queue.put({"num":num, "boxes": boxes, "labels": labels, "scores": scores})

    # End while

    input_manager.shutdown()
    output_manager.shutdown()
Ejemplo n.º 4
0
def main():

    model_path = 'data/detect.tflite'
    labels_path = 'data/coco_labels.txt'
    threshold = 0.4

    labels = load_labels(labels_path)
    interpreter = Interpreter(model_path)

    interpreter.allocate_tensors()
    _, input_height, input_width, _ = interpreter.get_input_details(
    )[0]['shape']

    with picamera.PiCamera(resolution=(CAMERA_WIDTH, CAMERA_HEIGHT),
                           framerate=30) as camera:
        camera.start_preview()
        try:
            stream = io.BytesIO()
            annotator = Annotator(camera)
            for _ in camera.capture_continuous(stream,
                                               format='jpeg',
                                               use_video_port=True):
                stream.seek(0)
                image = Image.open(stream).convert('RGB').resize(
                    (input_width, input_height), Image.ANTIALIAS)
                start_time = time.monotonic()
                results = detect_objects(interpreter, image, threshold)
                elapsed_ms = (time.monotonic() - start_time) * 1000

                annotator.clear()
                annotate_objects(annotator, results, labels)
                annotator.text([5, 0], '%.1fms' % (elapsed_ms))
                annotator.update()

                stream.seek(0)
                stream.truncate()

        finally:
            camera.stop_preview()
    def __init__(self, model_filename='model.tflite'):
        # load the .tflite model with the TF Lite runtime interpreter
        self.interpreter = Interpreter(model_filename)
        self.interpreter.allocate_tensors()

        # get a handle to the input tensor details
        input_details = self.interpreter.get_input_details()[0]

        # get the input tensor index
        self.input_index = input_details['index']

        # get the shape of the input tensor, so we can rescale the
        # input image to the appropriate size when making predictions
        input_shape = input_details['shape']
        self._input_height = input_shape[1]
        self._input_width = input_shape[2]

        # get a handle to the input tensor details
        output_details = self.interpreter.get_output_details()[0]

        # get the output tensor index
        self.output_index = output_details['index']
Ejemplo n.º 6
0
def check_for_bird():
    """ is there a bird at the feeder? """
    labels = load_labels()
    interpreter = Interpreter(path_to_model)
    interpreter.allocate_tensors()
    _, height, width, _ = interpreter.get_input_details()[0]['shape']

    camera.start_preview()
    time.sleep(2)  # give the camera 2 seconds to adjust light balance
    camera.capture(path_to_image)
    image = Image.open(path_to_image)
    results = classify_image(interpreter, image)
    label_id, prob = results[0]
    # print("bird: " + labels[label_id])
    # print("prob: " + str(prob))
    camera.stop_preview()

    if prob > prob_threshold:
        bird = labels[label_id]
        bird = bird[bird.find(",") + 1:]
        prob_pct = str(round(prob * 100, 1)) + "%"
        send_note(bird, prob_pct)
Ejemplo n.º 7
0
    def __init__(self):
        print("init")
        # self.MODEL_NAME = "D:\\TP_PROGS\\Projects\\TeProjSahara\\model_objDetec\\modelStart"
        # self.MODEL_NAME = "D:\\TP_PROGS\\Projects\\TeProjSahara\\model_objDetec\\finalObjDet\\new\\new"
        self.MODEL_NAME = "models\\model_objDetec\\modelStart"
        # self.GRAPH_NAME = "detect.tflite"
        self.GRAPH_NAME = "ssd_mobilenet_v1_1_metadata_1.tflite"
        # self.LABELMAP_NAME = "labelmap.txt"
        self.LABELMAP_NAME = "label.txt"
        self.min_conf_threshold = 0.65
        # use_TPU = args.edgetpu
        self.listOfObjDetec = []

        # Path to label map file
        self.PATH_TO_LABELS = os.path.join(self.MODEL_NAME, self.LABELMAP_NAME)
        print(self.PATH_TO_LABELS)
        # Path to .tflite file, which contains the model that is used for object detection
        self.PATH_TO_CKPT = os.path.join(self.MODEL_NAME, self.GRAPH_NAME)

        # Load the label map
        with open(self.PATH_TO_LABELS, 'r') as f:
            self.labels = [line.strip() for line in f.readlines()]

        # Have to do a weird fix for label map if using the COCO "starter model" from
        # https://www.tensorflow.org/lite/models/object_detection/overview
        # First label is '???', which has to be removed.
        if self.labels[0] == '???':
            del (self.labels[0])

        # Load the Tensorflow Lite model.
        self.interpreter = Interpreter(model_path=self.PATH_TO_CKPT)
        self.interpreter.allocate_tensors()

        # Get model details
        self.input_details = self.interpreter.get_input_details()
        self.output_details = self.interpreter.get_output_details()
        self.height = self.input_details[0]['shape'][1]
        self.width = self.input_details[0]['shape'][2]
Ejemplo n.º 8
0
def main():
#    parser = ArgumentParser(description='Demo Camera')
#    parser.add_argument('-t','--threshold', type=float,help='threshold for classification')
#    parser.add_argument('-m','--model_path', type=str,help='threshold for classification')
#    args = parser.parse_args()
#    
#    threshold = args.threshold
#    model_path = args.model_path

    interpreter = Interpreter(model_path="./detect/detect.tflite")
    interpreter.allocate_tensors()
    
    _, input_height, input_width, _ = interpreter.get_input_details()[0]['shape']

    labels = load_labels("./detect/labelmap.txt")
    print("n-class : ",len(labels))
    print(labels)
    offset_label = 1
    
    cap = cv2.VideoCapture(0)
    while cap.isOpened():

        ret,image = cap.read()
        img = cv2.resize(image,(input_width, input_height))
        
        results = detect_objects(interpreter,img,0.4)
        
        for res in results:
            box = res["bounding_box"]
            class_id = res["class_id"]
            tl,br = convert_boxs(box,size=(CAMERA_WIDTH,CAMERA_HEIGHT))
            cv2.rectangle(image,tl,br,(0,255,0),2)
            cv2.putText(image,labels[class_id+offset_label],tl,0,1,(0,255,255),2)
    
        cv2.imshow("",image)
        if cv2.waitKey(22) == ord('q'):
            break
    cv2.destroyAllWindows()
Ejemplo n.º 9
0
    def __init__(self, config):
        self.config = config
        # Get the model name from the config
        self.model_name = self.config.get_section_dict('Detector')['Name']
        # Frames Per Second
        self.fps = None
        self.model_file = 'ped_ssd_mobilenet_v2_quantized_edgetpu.tflite'
        self.model_path = '/repo/data/edgetpu/' + self.model_file

        # Get the model .tflite file path from the config.
        # If there is no .tflite file in the path it will be downloaded automatically from base_url
        user_model_path = self.config.get_section_dict('Detector')['ModelPath']
        if len(user_model_path) > 0:
            print('using %s as model' % user_model_path)
            self.model_path = user_model_path
        else:
            base_url = 'https://media.githubusercontent.com/media/neuralet/neuralet-models/master/edge-tpu/'
            url = base_url + self.model_name + '/' + self.model_file

            if not os.path.isfile(self.model_path):
                print('model does not exist under: ', self.model_path,
                      'downloading from ', url)
                wget.download(url, self.model_path)

        # Load TFLite model and allocate tensors
        self.interpreter = Interpreter(
            self.model_path,
            experimental_delegates=[load_delegate("libedgetpu.so.1")])
        self.interpreter.allocate_tensors()
        # Get the model input and output tensor details
        self.input_details = self.interpreter.get_input_details()
        self.output_details = self.interpreter.get_output_details()

        # Get class id from config
        self.class_id = int(
            self.config.get_section_dict('Detector')['ClassID'])
        self.score_threshold = float(
            self.config.get_section_dict('Detector')['MinScore'])
def main():

    interpreter = Interpreter(model_path, experimental_delegates=[load_delegate("libedgetpu.so.1")])

    interpreter.allocate_tensors()
    input_details = interpreter.get_input_details()
    output_details = interpreter.get_output_details()

    input_queue = input_manager.get_input_queue()
    output_queue = output_manager.get_output_queue()

    print('------------------------------------------------------------------------------------------')
    print('Started Inference server, waiting for incoming requests ... (send \'stop\' to kill server)')
    print('------------------------------------------------------------------------------------------')

    while True:
        data = input_queue.get()
        print('recieved data with type ', type(data))

        if type(data) == str:
            if data == "stop": break
            else: pint('data is: ', data)
        
        if type(data) == list:
            data = np.array(data, dtype=np.uint8)
            input_image = np.expand_dims(data, axis=0)
            interpreter.set_tensor(input_details[0]["index"], input_image)
            t_begin = time.perf_counter()
            interpreter.invoke()
            inference_time = time.perf_counter() - t_begin
            net_output = interpreter.get_tensor(output_details[0]["index"])
            print('inference output: ', net_output , ', done in ', inference_time, ' seconds' )
            output_queue.put(net_output)

    # End while

    input_manager.shutdown()
    output_manager.shutdown()
Ejemplo n.º 11
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--model',
                        help='File path of .tflite file.',
                        required=True)
    parser.add_argument('--labels',
                        help='File path of labels file.',
                        required=True)
    args = parser.parse_args()

    labels = load_labels(args.labels)

    interpreter = Interpreter(args.model)
    interpreter.allocate_tensors()
    _, height, width, _ = interpreter.get_input_details()[0]['shape']

    with picamera.PiCamera(resolution=(320, 240), framerate=30) as camera:
        camera.start_preview()
        try:
            stream = io.BytesIO()
            for _ in camera.capture_continuous(stream,
                                               format='jpeg',
                                               use_video_port=True):
                stream.seek(0)
                image = Image.open(stream).convert('RGB').resize(
                    (width, height), Image.ANTIALIAS)
                start_time = time.time()
                results = classify_image(interpreter, image)
                logging.info(results)
                elapsed_ms = (time.time() - start_time) * 1000
                label_id, prob = results[0]
                stream.seek(0)
                stream.truncate()
                camera.annotate_text = '%s %.2f\n%.1fms' % (labels[label_id],
                                                            prob, elapsed_ms)
        finally:
            camera.stop_preview()
Ejemplo n.º 12
0
def redwhitepredict():
    # user input
    parser = reqparse.RequestParser()
    parser.add_argument('characteristics', type=str, required=True, help="This is expecting a selection of wine characteristics", action='append')
    args = parser.parse_args()
    characteristics = args['characteristics'][0].split(' ')

    #  Load the model
    
    interpreter = Interpreter(model_path="Red_and_White_Analysis/redorwhite_model_trained.tflite")
    interpreter.allocate_tensors()
    input_details = interpreter.get_input_details()
    output_details = interpreter.get_output_details()

    # Test the model on random input data.
    input_shape = input_details[0]['shape']
    user_input_characteristics=[float(i) for i in characteristics]
    print(user_input_characteristics)

    input_data = np.array([user_input_characteristics], dtype=np.float32)
    interpreter.set_tensor(input_details[0]['index'], input_data)

    interpreter.invoke()

    # The function `get_tensor()` returns a copy of the tensor data.
    # Use `tensor()` in order to get a pointer to the tensor.
    output_data = interpreter.get_tensor(output_details[0]['index'])
    print(output_data)

    #redorwhite_model = load_model('Red_and_White_Analysis/redorwhite_model_trained.h5')

    # #predict the wine class based on model and save output to 'out'
    # user_input_characteristics=[float(i) for i in characteristics]

    #run model with user input
    #result_characteristics = redorwhite_model.predict_classes([user_input_characteristics])
    result_characteristics = "White" if output_data[0][0] ==0 else "Red"
    return jsonify({'wine_selection': result_characteristics})
Ejemplo n.º 13
0
	def image_classification_render():
		#model configuration
		parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
		parser.add_argument('--model', help='File path of .tflite file.', required=True)
		parser.add_argument('--labels', help='File path of labels file.', required=True)
		args = parser.parse_args()
		labels = load_labels(args.labels)
		interpreter = Interpreter(args.model)
		interpreter.allocate_tensors()
		_, height, width, _ = interpreter.get_input_details()[0]['shape']
		
		#image request
		self.image_request()
		
		#image classification
		final_image = Image.open(final).convert('RGB').resize((width, height),Image.ANTIALIAS)
		start_time = time.time()
		results = classify_image(interpreter, final_image)
		elapsed_ms = (time.time() - start_time) * 1000
		label_id, prob = results[0]
		cv2.putText(final_image, str(labels[label_id])+' '+str(prob)+' '+str(elapsed_ms), (10,20), cv2.FONT_HERSHEY_SIMPLEX, 0.6,(250,25,250), 2)
		self.video.release()
		return final_image
Ejemplo n.º 14
0
def main(base_path):
    model_path = os.path.join(base_path, 'model.tflite')
    label_path = os.path.join(base_path, 'labels.txt')
    threshold = 0.4

    labels = load_labels(label_path)
    interpreter = Interpreter(model_path)
    interpreter.allocate_tensors()
    _, input_height, input_width, _ = interpreter.get_input_details(
    )[0]['shape']

    camera = cv2.VideoCapture(0)

    while True:
        s, image = camera.read()
        if s:  # frame captured without any errors
            image = Image.fromarray(image).convert('RGB').resize(
                (input_width, input_height), Image.ANTIALIAS)

            results = detect_objects(interpreter, image, threshold)

            if len(results) > 0:
                print(labels[results[0]['class_id']])
Ejemplo n.º 15
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument(
        '--model', help='File path of .tflite file.', required=True)
    parser.add_argument(
        '--labels', help='File path of labels file.', required=True)
    parser.add_argument(
        '--image', help='File path of image file.', required=True)
    args = parser.parse_args()

    labels = load_labels(args.labels)

    interpreter = Interpreter(args.model)
    interpreter.allocate_tensors()
    _, height, width, _ = interpreter.get_input_details()[0]['shape']
    print("%sx%s" % (height, width))
    image = Image.open(args.image).convert('RGB').resize((width, height),
                                                     Image.ANTIALIAS)
    results = classify_image(interpreter, image)
    for label_id, prob in results:
        annotate_text = '%s %.2f' % (labels[label_id], prob)
        print(annotate_text)
Ejemplo n.º 16
0
def compute(model_path):
    now = time.monotonic()
    intp = Interpreter(model_path)
    x = intp.tensor(intp.get_input_details()[0]['index'])
    iy = intp.get_output_details()[0]['index']
    intp.allocate_tensors()
    t1 = time.monotonic() - now
    now = time.monotonic()
    
    for i in range(WARMUP):
        #x().fill(CONSTANT)
        x =np.random.rand()
        intp.invoke()
        y = intp.get_tensor(iy)
    t2 = time.monotonic() - now
    now = time.monotonic()
    for i in range(ITER):
        #x().fill(CONSTANT)
        x =np.random.rand()
        intp.invoke()
        y = intp.get_tensor(iy)
    t3 = time.monotonic() - now    
    return t1, t2/float(WARMUP), t3/float(ITER)
Ejemplo n.º 17
0
def apply(tflite_path, in0, in1, in2):
    interpreter = Interpreter(model_path=tflite_path)
    interpreter.allocate_tensors()
    input_details = interpreter.get_input_details()
    output_details = interpreter.get_output_details()

    input_tensor0 = input_details[0]['index']
    input_tensor1 = input_details[1]['index']
    input_tensor2 = input_details[2]['index']
    output_tensors = [tensor['index'] for tensor in output_details]

    res1 = []
    res2 = []
    res3 = []
    for line0, line1, line2 in zip(in0.values, in1.values, in2.values):
        line_res1 = []
        line_res2 = []
        line_res3 = []
        interpreter.set_tensor(
            input_tensor0,
            np.array([line0]).astype(input_details[0]['dtype']))
        interpreter.set_tensor(
            input_tensor1,
            np.array([line1]).astype(input_details[1]['dtype']))
        interpreter.set_tensor(
            input_tensor2,
            np.array([line2]).astype(input_details[1]['dtype']))
        interpreter.invoke()
        for t in output_tensors:
            a = interpreter.get_tensor(t)
            line_res1.append(a[0][0])
            line_res2.append(a[0][1])
            line_res3.append(a[0][2])
        res1.append(np.median(line_res1))
        res2.append(np.median(line_res2))
        res3.append(np.median(line_res3))
    return res1, res2, res3
Ejemplo n.º 18
0
def main():
    MODEL_PATH = BASE_DIR + '/pretrained_models/detect.tflite'
    LABEL_PATH = BASE_DIR + '/pretrained_models/coco_labels.txt'
    threshold = 0.4

    labels = load_labels(LABEL_PATH)
    interpreter = Interpreter(MODEL_PATH)
    interpreter.allocate_tensors()
    _, input_height, input_width, _ = interpreter.get_input_details(
    )[0]['shape']
    input_size = (input_width, input_height)

    camera = cv2.VideoCapture(index=0)
    camera.set(cv2.CAP_PROP_FPS, 3)
    annotator = Annotator(img_size=(640, 480))

    while (True):
        ret, in_img = camera.read()
        in_img = imutils.rotate_bound(in_img, angle=180)
        img = cv2.resize(in_img,
                         dsize=input_size,
                         interpolation=cv2.INTER_NEAREST)

        start_time = time.monotonic()
        results = detect_objects(interpreter, img, threshold)
        elapsed_ms = (time.monotonic() - start_time) * 1000

        annotator.clear()
        annotate_objects(annotator, results, labels)
        annotator.text([5, 0], '%.1fms' % (elapsed_ms))
        annotator.update(in_img)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    camera.release()
    cv2.destroyAllWindows()
Ejemplo n.º 19
0
def hair_pred(path, color):
    img = cv2.imread(path)
    deep_model = 'bisenetv2_celebamaskhq_448x448_float16_quant.tflite'
    num_threads = 4
    interpreter = Interpreter(model_path=deep_model, num_threads=num_threads)
    interpreter.allocate_tensors()
    input_details = interpreter.get_input_details()[0]['index']
    bisenetv2_predictions = interpreter.get_output_details()[0]['index']
    model_height = interpreter.get_input_details()[0]['shape'][1]
    model_width = interpreter.get_input_details()[0]['shape'][2]
    height, width, channels = img.shape

    prepimg_deep_size = cv2.resize(img, (model_width, model_height))
    prepimg_deep = cv2.cvtColor(prepimg_deep_size, cv2.COLOR_BGR2RGB)
    prepimg_deep = np.expand_dims(prepimg_deep, axis=0)
    prepimg_deep = prepimg_deep.astype(np.float32)
    prepimg_deep /= 255.0
    prepimg_deep -= [[[0.5, 0.5, 0.5]]]
    prepimg_deep /= [[[0.5, 0.5, 0.5]]]

    # Run model

    interpreter.set_tensor(input_details, prepimg_deep)
    interpreter.invoke()

    # Get results
    predictions = interpreter.get_tensor(bisenetv2_predictions)
    table = {'hair': 17, 'upper_lip': 12, 'lower_lip': 13}

    part = table['hair']
    image = hair(prepimg_deep_size, predictions, part, color)
    imdraw = cv2.resize(image, (width, height))
    image = Image.fromarray(cv2.cvtColor(imdraw, cv2.COLOR_BGR2RGB))
    bio = BytesIO()
    bio.name = 'hair_seg.jpg'
    image.save(bio, 'JPEG')
    return bio
Ejemplo n.º 20
0
    def __init__(self, config):
        self.config = config
        self.model_name = "OFMClassifier_edgetpu.tflite"
        if os.path.isfile(config.CLASSIFIER_MODEL_PATH):
            self.model_path = config.CLASSIFIER_MODEL_PATH
        else:
            self.model_path = 'data/classifiers/edgetpu/'
            if not os.path.isdir(self.model_path):
                os.makedirs(self.model_path)
            self.model_path = self.model_path + self.model_name

        # Frames Per Second
        self.fps = None
        if not os.path.isfile(self.model_path):
            url = "https://raw.githubusercontent.com/neuralet/neuralet-models/master/edge-tpu/OFMClassifier/OFMClassifier_edgetpu.tflite"
            print("model does not exist under: ", self.model_path, "downloading from ", url)
            wget.download(url, self.model_path)

        # Load TFLite model and allocate tensors
        self.interpreter = Interpreter(self.model_path, experimental_delegates=[load_delegate("libedgetpu.so.1")])
        self.interpreter.allocate_tensors()
        # Get the model input and output tensor details
        self.input_details = self.interpreter.get_input_details()
        self.output_details = self.interpreter.get_output_details()
Ejemplo n.º 21
0
    def __init__(self, model_name: str) -> None:
        """Initialize a MoveNet pose estimation model.

    Args:
      model_name: Name of the TFLite MoveNet model.
    """

        # Append TFLITE extension to model_name if there's no extension
        _, ext = os.path.splitext(model_name)
        if not ext:
            model_name += '.tflite'

        # Initialize model
        interpreter = Interpreter(model_path=model_name, num_threads=4)
        interpreter.allocate_tensors()

        self._input_index = interpreter.get_input_details()[0]['index']
        self._output_index = interpreter.get_output_details()[0]['index']

        self._input_height = interpreter.get_input_details()[0]['shape'][1]
        self._input_width = interpreter.get_input_details()[0]['shape'][2]

        self._interpreter = interpreter
        self._crop_region = None
# Load the label map
with open(PATH_TO_LABELS, 'r') as f:
    labels = [line.strip() for line in f.readlines()]

# Have to do a weird fix for label map if using the COCO "starter model" from
# https://www.tensorflow.org/lite/models/object_detection/overview
# First label is '???', which has to be removed.
if labels[0] == '???':
    del (labels[0])

# Load the Tensorflow Lite model.
# If using Edge TPU, use special load_delegate argument
if use_TPU:
    interpreter = Interpreter(
        model_path=PATH_TO_CKPT,
        experimental_delegates=[load_delegate('libedgetpu.so.1.0')])
    print(PATH_TO_CKPT)
else:
    interpreter = Interpreter(model_path=PATH_TO_CKPT)

interpreter.allocate_tensors()

# Get model details
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
height = input_details[0]['shape'][1]
width = input_details[0]['shape'][2]

floating_model = (input_details[0]['dtype'] == np.float32)
Ejemplo n.º 23
0
def main():
  parser = argparse.ArgumentParser(
      formatter_class=argparse.ArgumentDefaultsHelpFormatter)
  parser.add_argument(
      '--model', help='File path of .tflite file.', required=True)
  parser.add_argument(
      '--labels', help='File path of labels file.', required=True)
  parser.add_argument(
      '--threshold',
      help='Score threshold for detected objects.',
      required=False,
      type=float,
      default=0.4)
  args = parser.parse_args()

  labels = load_labels(args.labels)
  interpreter = Interpreter(args.model)
  interpreter.allocate_tensors()
  _, input_height, input_width, _ = interpreter.get_input_details()[0]['shape']

  with picamera.PiCamera(
      resolution=(CAMERA_WIDTH, CAMERA_HEIGHT), framerate=30) as camera:
    camera.start_preview()
    try:
      stream = io.BytesIO()
      annotator = Annotator(camera)
      for _ in camera.capture_continuous(
          stream, format='jpeg', use_video_port=True):
        stream.seek(0)
        image = Image.open(stream).convert('RGB').resize(
            (input_width, input_height), Image.ANTIALIAS)
        start_time = time.monotonic()
        results = detect_objects(interpreter, image, args.threshold)
        elapsed_ms = (time.monotonic() - start_time) * 1000
        annotator.clear()
        annotate_objects(annotator, results, labels)
        
        try:
            print(results[0]['class_id'])
            if results[0]['class_id'] in [46.0, 16.0, 17.0]:
                if results[0]['class_id'] == 46.0:
                    print("Cup!!!!")
                    blink.found_cup()
                    
                if results[0]['class_id'] == 16.0:
                    print("Cat !!!!")
                    blink.found_cat()

                if results[0]['class_id'] == 17.0:
                    print("Dog !!!!")
                    blink.found_dog()

        except:
          print("")

        annotator.text([5, 0], '%.1fms' % (elapsed_ms))
        annotator.update()

        stream.seek(0)
        stream.truncate()

    finally:
      camera.stop_preview()
Ejemplo n.º 24
0
PATH_TO_LABELS = '../data/Sample_TFLite_model/labelmap.txt'

# Load the label map
with open(PATH_TO_LABELS, 'r') as f:
    labels = [line.strip() for line in f.readlines()]

# Have to do a weird fix for label map if using the COCO "starter model" from
# https://www.tensorflow.org/lite/models/object_detection/overview
# First label is '???', which has to be removed.
if labels[0] == '???':
    del (labels[0])
print('setup path name and label map ok')

#%%
# Load the Tensorflow Lite model.
interpreter = Interpreter(model_path=PATH_TO_CKPT)
interpreter.allocate_tensors()
# Get model details
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
height = input_details[0]['shape'][1]
width = input_details[0]['shape'][2]

floating_model = (input_details[0]['dtype'] == np.float32)

input_mean = 127.5
input_std = 127.5

print(f'{width},{height}')

# %%
Ejemplo n.º 25
0
import Proximity_Sensor
from tflite_runtime.interpreter import Interpreter
import numpy as np

# Create objects
adc = ADS1015.ADS1015()
proximity = Proximity_Sensor.VCNL4010()

Min_Force = 15500
Max_Force = 28000

Min_Proximity = 2100
Max_Proximity = 16000

# Instantiate TF Lite Model
interpreter = Interpreter(model_path = "my_tflite_model_4.tflite")
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()

# Create Empty Data Storage
Raw_Data = []
Predictions = []

while True:

    # Scale 0-1 according to projected min and max values
    if adc.read_adc(0) > Max_Force:
    	Force = 1
    elif adc.read_adc(0) < Min_Force:
    	Force = 0
Ejemplo n.º 26
0
    parser.add_argument("--camera_height",
                        type=int,
                        default=480,
                        help="height.")
    parser.add_argument("--vidfps", type=int, default=30, help="Frame rate.")
    parser.add_argument("--num_threads", type=int, default=4, help="Threads.")
    args = parser.parse_args()

    model = args.model
    usbcamno = args.usbcamno
    image_width = args.camera_width
    image_height = args.camera_height
    vidfps = args.vidfps
    num_threads = args.num_threads

    interpreter = Interpreter(model_path=model)
    try:
        interpreter.set_num_threads(num_threads)
    except:
        print(
            "WARNING: The installed PythonAPI of Tensorflow/Tensorflow Lite runtime does not support Multi-Thread processing."
        )
        print("WARNING: It works in single thread mode.")
        print(
            "WARNING: If you want to use Multi-Thread to improve performance on aarch64/armv7l platforms, please refer to one of the below to implement a customized Tensorflow/Tensorflow Lite runtime."
        )
        print("https://github.com/PINTO0309/Tensorflow-bin.git")
        print("https://github.com/PINTO0309/TensorflowLite-bin.git")
        pass
    interpreter.allocate_tensors()
    input_details = interpreter.get_input_details()
Ejemplo n.º 27
0
 def init_model(self):
     print("[!] Loading {} ..".format(str(self.args["model"])))
     self.interpreter = Interpreter(self.args["model"])
     print("[*] Done")
     self.interpreter.allocate_tensors()
     _, height, width, _ = self.interpreter.get_input_details()[0]['shape']
Ejemplo n.º 28
0
def object_detection(MODEL_NAME, GRAPH_NAME, LABELMAP_NAME,
                     min_conf_threshold):
    # Grab global references
    global videostream, outputFrame, lock, imW, imH

    # MODEL_NAME = args.modeldir
    # GRAPH_NAME = args.graph
    # LABELMAP_NAME = args.labels

    # min_conf_threshold = args.threshold

    # resW, resH = args.resolution.split('x')
    # resW, resH = resolution.split('x')
    # imW, imH = int(resW), int(resH)

    # Get path to current working directory
    CWD_PATH = os.getcwd()

    # Path to .tflite file, which contains the model that is used for object detection
    PATH_TO_CKPT = os.path.join(CWD_PATH, MODEL_NAME, GRAPH_NAME)

    # Path to label map file
    PATH_TO_LABELS = os.path.join(CWD_PATH, MODEL_NAME, LABELMAP_NAME)

    # Load the label map
    with open(PATH_TO_LABELS, 'r') as f:
        labels = [line.strip() for line in f.readlines()]

    # If using the COCO "starter model" from https://www.tensorflow.org/lite/models/object_detection/overview
    # Have to remove '???' label
    if labels[0] == '???':
        del (labels[0])

    # Load the Tensorflow Lite model and get details
    interpreter = Interpreter(model_path=PATH_TO_CKPT)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    output_details = interpreter.get_output_details()
    height = input_details[0]['shape'][1]
    width = input_details[0]['shape'][2]

    floating_model = (input_details[0]['dtype'] == np.float32)

    input_mean = 127.5
    input_std = 127.5

    # Initialize frame rate calculation
    frame_rate_calc = 1
    freq = cv2.getTickFrequency()

    # Loop through frames from the video stream
    while True:
        # Start timer (for calculating frame rate)
        t1 = cv2.getTickCount()

        # Grab frame from video stream
        frame1 = videostream.read()

        # Acquire frame and resize to expected shape [1xHxWx3]
        # frame = frame1.copy()
        frame = cv2.flip(frame1, -1)
        frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        frame_resized = cv2.resize(frame_rgb, (width, height))
        input_data = np.expand_dims(frame_resized, axis=0)

        # Normalize pixel values if using a floating model (i.e. if model is non-quantized)
        if floating_model:
            input_data = (np.float32(input_data) - input_mean) / input_std

        # Perform the actual detection by running the model with the image as input
        interpreter.set_tensor(input_details[0]['index'], input_data)
        interpreter.invoke()

        # Retrieve detection results
        boxes = interpreter.get_tensor(output_details[0]['index'])[
            0]  # Bounding box coordinates of detected objects
        classes = interpreter.get_tensor(
            output_details[1]['index'])[0]  # Class index of detected objects
        scores = interpreter.get_tensor(
            output_details[2]['index'])[0]  # Confidence of detected objects
        #num = interpreter.get_tensor(output_details[3]['index'])[0]  # Total number of detected objects (inaccurate and not needed)

        # Loop over all detections and draw detection box if confidence is above minimum threshold
        for i in range(len(scores)):
            if ((scores[i] > min_conf_threshold) and (scores[i] <= 1.0)):
                # Get bounding box coordinates and draw box
                # Interpreter can return coordinates that are outside of image dimensions, need to force them to be within image using max() and min()
                ymin = int(max(1, (boxes[i][0] * imH)))
                xmin = int(max(1, (boxes[i][1] * imW)))
                ymax = int(min(imH, (boxes[i][2] * imH)))
                xmax = int(min(imW, (boxes[i][3] * imW)))

                cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (10, 255, 0),
                              2)

                # Draw label
                object_name = labels[int(
                    classes[i]
                )]  # Look up object name from "labels" array using class index
                label = '%s: %d%%' % (object_name, int(scores[i] * 100)
                                      )  # Example: 'person: 72%'
                labelSize, baseLine = cv2.getTextSize(label,
                                                      cv2.FONT_HERSHEY_SIMPLEX,
                                                      0.7, 2)  # Get font size
                label_ymin = max(
                    ymin, labelSize[1] + 10
                )  # Make sure not to draw label too close to top of window
                cv2.rectangle(
                    frame, (xmin, label_ymin - labelSize[1] - 10),
                    (xmin + labelSize[0], label_ymin + baseLine - 10),
                    (255, 255, 255),
                    cv2.FILLED)  # Draw white box to put label text in
                cv2.putText(frame, label, (xmin, label_ymin - 7),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0),
                            2)  # Draw label text

        # Draw framerate in corner of frame
        cv2.putText(frame, 'FPS: {0:.2f}'.format(frame_rate_calc), (30, 50),
                    cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 1, cv2.LINE_AA)

        # All the results have been drawn on the frame, so it's time to display it.
        # cv2.imshow('Object detector', frame)

        # Acquire the lock, set the output frame, and release the lock
        with lock:
            outputFrame = frame.copy()

        # Calculate framerate
        t2 = cv2.getTickCount()
        time1 = (t2 - t1) / freq
        frame_rate_calc = 1 / time1
Ejemplo n.º 29
0
    def __init__(
        self,
        model_path: str,
        options: ObjectDetectorOptions = ObjectDetectorOptions()
    ) -> None:
        """Initialize a TFLite object detection model.

    Args:
        model_path: Path to the TFLite model.
        options: The config to initialize an object detector. (Optional)

    Raises:
        ValueError: If the TFLite model is invalid.
        OSError: If the current OS isn't supported by EdgeTPU.
    """

        # Load metadata from model.
        displayer = metadata.MetadataDisplayer.with_model_file(model_path)

        # Save model metadata for preprocessing later.
        model_metadata = json.loads(displayer.get_metadata_json())
        process_units = model_metadata['subgraph_metadata'][0][
            'input_tensor_metadata'][0]['process_units']
        mean = 127.5
        std = 127.5
        for option in process_units:
            if option['options_type'] == 'NormalizationOptions':
                mean = option['options']['mean'][0]
                std = option['options']['std'][0]
        self._mean = mean
        self._std = std

        # Load label list from metadata.
        file_name = displayer.get_packed_associated_file_list()[0]
        label_map_file = displayer.get_associated_file_buffer(
            file_name).decode()
        label_list = list(filter(len, label_map_file.splitlines()))
        self._label_list = label_list

        # Initialize TFLite model.
        if options.enable_edgetpu:
            if edgetpu_lib_name() is None:
                raise OSError(
                    "The current OS isn't supported by Coral EdgeTPU.")
            interpreter = Interpreter(
                model_path=model_path,
                experimental_delegates=[load_delegate(edgetpu_lib_name())],
                num_threads=options.num_threads)
        else:
            interpreter = Interpreter(model_path=model_path,
                                      num_threads=options.num_threads)

        interpreter.allocate_tensors()
        input_detail = interpreter.get_input_details()[0]

        # From TensorFlow 2.6, the order of the outputs become undefined.
        # Therefore we need to sort the tensor indices of TFLite outputs and to know
        # exactly the meaning of each output tensor. For example, if
        # output indices are [601, 599, 598, 600], tensor names and indices aligned
        # are:
        #   - location: 598
        #   - category: 599
        #   - score: 600
        #   - detection_count: 601
        # because of the op's ports of TFLITE_DETECTION_POST_PROCESS
        # (https://github.com/tensorflow/tensorflow/blob/a4fe268ea084e7d323133ed7b986e0ae259a2bc7/tensorflow/lite/kernels/detection_postprocess.cc#L47-L50).
        sorted_output_indices = sorted(
            [output['index'] for output in interpreter.get_output_details()])
        self._output_indices = {
            self._OUTPUT_LOCATION_NAME: sorted_output_indices[0],
            self._OUTPUT_CATEGORY_NAME: sorted_output_indices[1],
            self._OUTPUT_SCORE_NAME: sorted_output_indices[2],
            self._OUTPUT_NUMBER_NAME: sorted_output_indices[3],
        }

        self._input_size = input_detail['shape'][2], input_detail['shape'][1]
        self._is_quantized_input = input_detail['dtype'] == np.uint8
        self._interpreter = interpreter
        self._options = options
Ejemplo n.º 30
0
    def __init__(self,
                 weights='yolov5s.pt',
                 device=torch.device('cpu'),
                 dnn=False,
                 data=None,
                 fp16=False):
        # Usage:
        #   PyTorch:              weights = *.pt
        #   TorchScript:                    *.torchscript
        #   ONNX Runtime:                   *.onnx
        #   ONNX OpenCV DNN:                *.onnx with --dnn
        #   OpenVINO:                       *.xml
        #   CoreML:                         *.mlmodel
        #   TensorRT:                       *.engine
        #   TensorFlow SavedModel:          *_saved_model
        #   TensorFlow GraphDef:            *.pb
        #   TensorFlow Lite:                *.tflite
        #   TensorFlow Edge TPU:            *_edgetpu.tflite
        from models.experimental import attempt_download, attempt_load  # scoped to avoid circular import

        super().__init__()
        w = str(weights[0] if isinstance(weights, list) else weights)
        pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs = self.model_type(
            w)  # get backend
        w = attempt_download(w)  # download if not local
        fp16 &= (pt or jit or onnx or engine) and device.type != 'cpu'  # FP16
        stride, names = 32, [f'class{i}'
                             for i in range(1000)]  # assign defaults
        if data:  # assign class names (optional)
            with open(data, errors='ignore') as f:
                names = yaml.safe_load(f)['names']

        if pt:  # PyTorch
            model = attempt_load(weights if isinstance(weights, list) else w,
                                 device=device)
            stride = max(int(model.stride.max()), 32)  # model stride
            names = model.module.names if hasattr(
                model, 'module') else model.names  # get class names
            model.half() if fp16 else model.float()
            self.model = model  # explicitly assign for to(), cpu(), cuda(), half()
        elif jit:  # TorchScript
            LOGGER.info(f'Loading {w} for TorchScript inference...')
            extra_files = {'config.txt': ''}  # model metadata
            model = torch.jit.load(w, _extra_files=extra_files)
            model.half() if fp16 else model.float()
            if extra_files['config.txt']:
                d = json.loads(extra_files['config.txt'])  # extra_files dict
                stride, names = int(d['stride']), d['names']
        elif dnn:  # ONNX OpenCV DNN
            LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...')
            check_requirements(('opencv-python>=4.5.4', ))
            net = cv2.dnn.readNetFromONNX(w)
        elif onnx:  # ONNX Runtime
            LOGGER.info(f'Loading {w} for ONNX Runtime inference...')
            cuda = torch.cuda.is_available()
            check_requirements(
                ('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime'))
            import onnxruntime
            providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'
                         ] if cuda else ['CPUExecutionProvider']
            session = onnxruntime.InferenceSession(w, providers=providers)
            meta = session.get_modelmeta().custom_metadata_map  # metadata
            if 'stride' in meta:
                stride, names = int(meta['stride']), eval(meta['names'])
        elif xml:  # OpenVINO
            LOGGER.info(f'Loading {w} for OpenVINO inference...')
            check_requirements(
                ('openvino', )
            )  # requires openvino-dev: https://pypi.org/project/openvino-dev/
            from openvino.runtime import Core, Layout, get_batch
            ie = Core()
            if not Path(w).is_file():  # if not *.xml
                w = next(Path(w).glob(
                    '*.xml'))  # get *.xml file from *_openvino_model dir
            network = ie.read_model(model=w,
                                    weights=Path(w).with_suffix('.bin'))
            if network.get_parameters()[0].get_layout().empty:
                network.get_parameters()[0].set_layout(Layout("NCHW"))
            batch_dim = get_batch(network)
            if batch_dim.is_static:
                batch_size = batch_dim.get_length()
            executable_network = ie.compile_model(
                network,
                device_name="CPU")  # device_name="MYRIAD" for Intel NCS2
            output_layer = next(iter(executable_network.outputs))
            meta = Path(w).with_suffix('.yaml')
            if meta.exists():
                stride, names = self._load_metadata(meta)  # load metadata
        elif engine:  # TensorRT
            LOGGER.info(f'Loading {w} for TensorRT inference...')
            import tensorrt as trt  # https://developer.nvidia.com/nvidia-tensorrt-download
            check_version(trt.__version__, '7.0.0',
                          hard=True)  # require tensorrt>=7.0.0
            Binding = namedtuple('Binding',
                                 ('name', 'dtype', 'shape', 'data', 'ptr'))
            logger = trt.Logger(trt.Logger.INFO)
            with open(w, 'rb') as f, trt.Runtime(logger) as runtime:
                model = runtime.deserialize_cuda_engine(f.read())
            bindings = OrderedDict()
            fp16 = False  # default updated below
            for index in range(model.num_bindings):
                name = model.get_binding_name(index)
                dtype = trt.nptype(model.get_binding_dtype(index))
                shape = tuple(model.get_binding_shape(index))
                data = torch.from_numpy(np.empty(
                    shape, dtype=np.dtype(dtype))).to(device)
                bindings[name] = Binding(name, dtype, shape, data,
                                         int(data.data_ptr()))
                if model.binding_is_input(index) and dtype == np.float16:
                    fp16 = True
            binding_addrs = OrderedDict(
                (n, d.ptr) for n, d in bindings.items())
            context = model.create_execution_context()
            batch_size = bindings['images'].shape[0]
        elif coreml:  # CoreML
            LOGGER.info(f'Loading {w} for CoreML inference...')
            import coremltools as ct
            model = ct.models.MLModel(w)
        else:  # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU)
            if saved_model:  # SavedModel
                LOGGER.info(
                    f'Loading {w} for TensorFlow SavedModel inference...')
                import tensorflow as tf
                keras = False  # assume TF1 saved_model
                model = tf.keras.models.load_model(
                    w) if keras else tf.saved_model.load(w)
            elif pb:  # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt
                LOGGER.info(
                    f'Loading {w} for TensorFlow GraphDef inference...')
                import tensorflow as tf

                def wrap_frozen_graph(gd, inputs, outputs):
                    x = tf.compat.v1.wrap_function(
                        lambda: tf.compat.v1.import_graph_def(gd, name=""),
                        [])  # wrapped
                    ge = x.graph.as_graph_element
                    return x.prune(tf.nest.map_structure(ge, inputs),
                                   tf.nest.map_structure(ge, outputs))

                gd = tf.Graph().as_graph_def()  # graph_def
                with open(w, 'rb') as f:
                    gd.ParseFromString(f.read())
                frozen_func = wrap_frozen_graph(gd,
                                                inputs="x:0",
                                                outputs="Identity:0")
            elif tflite or edgetpu:  # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python
                try:  # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu
                    from tflite_runtime.interpreter import Interpreter, load_delegate
                except ImportError:
                    import tensorflow as tf
                    Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate,
                if edgetpu:  # Edge TPU https://coral.ai/software/#edgetpu-runtime
                    LOGGER.info(
                        f'Loading {w} for TensorFlow Lite Edge TPU inference...'
                    )
                    delegate = {
                        'Linux': 'libedgetpu.so.1',
                        'Darwin': 'libedgetpu.1.dylib',
                        'Windows': 'edgetpu.dll'
                    }[platform.system()]
                    interpreter = Interpreter(
                        model_path=w,
                        experimental_delegates=[load_delegate(delegate)])
                else:  # Lite
                    LOGGER.info(
                        f'Loading {w} for TensorFlow Lite inference...')
                    interpreter = Interpreter(
                        model_path=w)  # load TFLite model
                interpreter.allocate_tensors()  # allocate
                input_details = interpreter.get_input_details()  # inputs
                output_details = interpreter.get_output_details()  # outputs
            elif tfjs:
                raise Exception(
                    'ERROR: YOLOv5 TF.js inference is not supported')
            else:
                raise Exception(f'ERROR: {w} is not a supported format')
        self.__dict__.update(locals())  # assign all variables to self