def testFloat(self):
        interpreter = interpreter_wrapper.Interpreter(
            model_path=resource_loader.get_path_to_datafile(
                'testdata/permute_float.tflite'))
        interpreter.allocate_tensors()

        input_details = interpreter.get_input_details()
        self.assertEqual(1, len(input_details))
        self.assertEqual('input', input_details[0]['name'])
        self.assertEqual(np.float32, input_details[0]['dtype'])
        self.assertTrue(([1, 4] == input_details[0]['shape']).all())
        self.assertEqual((0.0, 0), input_details[0]['quantization'])

        output_details = interpreter.get_output_details()
        self.assertEqual(1, len(output_details))
        self.assertEqual('output', output_details[0]['name'])
        self.assertEqual(np.float32, output_details[0]['dtype'])
        self.assertTrue(([1, 4] == output_details[0]['shape']).all())
        self.assertEqual((0.0, 0), output_details[0]['quantization'])

        test_input = np.array([[1.0, 2.0, 3.0, 4.0]], dtype=np.float32)
        expected_output = np.array([[4.0, 3.0, 2.0, 1.0]], dtype=np.float32)
        interpreter.set_tensor(input_details[0]['index'], test_input)
        interpreter.invoke()

        output_data = interpreter.get_tensor(output_details[0]['index'])
        self.assertTrue((expected_output == output_data).all())
Exemplo n.º 2
0
def run_tflite_graph(tflite_model_buf, input_data):
    """ Generic function to execute TFLite """
    input_data = convert_to_list(input_data)

    interpreter = interpreter_wrapper.Interpreter(
        model_content=tflite_model_buf)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    output_details = interpreter.get_output_details()

    # set input
    assert len(input_data) == len(input_details)
    for i in range(len(input_details)):
        interpreter.set_tensor(input_details[i]['index'], input_data[i])

    # Run
    interpreter.invoke()

    # get output
    tflite_output = list()
    for i in range(len(output_details)):
        tflite_output.append(interpreter.get_tensor(
            output_details[i]['index']))

    return tflite_output
Exemplo n.º 3
0
    def init_interpreter(self):
        model_path = resource_loader.get_path_to_datafile(self.tflitePath)
        with io.open(model_path, 'rb') as model_file:
            data = model_file.read()

        self.interpreter = interpreter_wrapper.Interpreter(model_content=data)
        self.interpreter.allocate_tensors()

        self.input_details = self.interpreter.get_input_details()
        print(self.input_details)

        input_details = self.input_details
        self.assertEqual(1, len(input_details))
        self.assertEqual('images',
                         input_details[0]['name'])  # 替换'input'为'images'
        self.assertEqual(np.uint8, input_details[0]['dtype'])
        self.assertTrue(([1, 224, 224,
                          3] == input_details[0]['shape']).all())  # 按照模型更改输入形状
        self.assertEqual(
            (0.012566016986966133, 131),
            input_details[0]['quantization'])  # uint8的quantization

        self.output_details = self.interpreter.get_output_details()
        print(self.output_details)

        output_details = self.output_details
        self.assertEqual(1, len(output_details))
        self.assertEqual('Softmax', output_details[0]['name'])  # 替换输出名称
        self.assertEqual(np.uint8, output_details[0]['dtype'])
        self.assertTrue(([1, 4] == output_details[0]['shape']).all())
        self.assertEqual((0.00390625, 0), output_details[0]['quantization'])
Exemplo n.º 4
0
 def testInvokeBeforeReady(self):
     interpreter = interpreter_wrapper.Interpreter(
         model_path=resource_loader.get_path_to_datafile(
             'testdata/permute_float.tflite'))
     with self.assertRaisesRegexp(
             RuntimeError, 'Invoke called on model that is not ready'):
         interpreter.invoke()
Exemplo n.º 5
0
 def load_model(self):
     # load tflite model
     self.interpreter = interpreter_wrapper.Interpreter(
         model_path=self.model_path)
     self.interpreter.allocate_tensors()
     self.input_details = self.interpreter.get_input_details()
     self.output_details = self.interpreter.get_output_details()
Exemplo n.º 6
0
    def testUint8(self):
        interpreter = interpreter_wrapper.Interpreter(
            resource_loader.get_path_to_datafile(
                'testdata/permute_uint8.tflite'))
        interpreter.allocate_tensors()

        input_details = interpreter.get_input_details()
        self.assertEqual(1, len(input_details))
        self.assertEqual('input', input_details[0]['name'])
        self.assertEqual(np.uint8, input_details[0]['dtype'])
        self.assertTrue(([1, 4] == input_details[0]['shape']).all())

        output_details = interpreter.get_output_details()
        self.assertEqual(1, len(output_details))
        self.assertEqual('output', output_details[0]['name'])
        self.assertEqual(np.uint8, output_details[0]['dtype'])
        self.assertTrue(([1, 4] == output_details[0]['shape']).all())

        test_input = np.array([[1, 2, 3, 4]], dtype=np.uint8)
        expected_output = np.array([[4, 3, 2, 1]], dtype=np.uint8)
        interpreter.set_tensor(input_details[0]['index'], test_input)
        interpreter.invoke()

        output_data = interpreter.get_tensor(output_details[0]['index'])
        self.assertTrue((expected_output == output_data).all())
    def testUint8(self):
        model_path = resource_loader.get_path_to_datafile(
            'testdata/permute_uint8.tflite')
        with io.open(model_path, 'rb') as model_file:
            data = model_file.read()

        interpreter = interpreter_wrapper.Interpreter(model_content=data)
        interpreter.allocate_tensors()

        input_details = interpreter.get_input_details()
        self.assertEqual(1, len(input_details))
        self.assertEqual('input', input_details[0]['name'])
        self.assertEqual(np.uint8, input_details[0]['dtype'])
        self.assertTrue(([1, 4] == input_details[0]['shape']).all())
        self.assertEqual((1.0, 0), input_details[0]['quantization'])

        output_details = interpreter.get_output_details()
        self.assertEqual(1, len(output_details))
        self.assertEqual('output', output_details[0]['name'])
        self.assertEqual(np.uint8, output_details[0]['dtype'])
        self.assertTrue(([1, 4] == output_details[0]['shape']).all())
        self.assertEqual((1.0, 0), output_details[0]['quantization'])

        test_input = np.array([[1, 2, 3, 4]], dtype=np.uint8)
        expected_output = np.array([[4, 3, 2, 1]], dtype=np.uint8)
        interpreter.resize_tensor_input(
            input_details[0]['index'],
            np.array(test_input.shape, dtype=np.int32))
        interpreter.allocate_tensors()
        interpreter.set_tensor(input_details[0]['index'], test_input)
        interpreter.invoke()

        output_data = interpreter.get_tensor(output_details[0]['index'])
        self.assertTrue((expected_output == output_data).all())
 def setUp(self):
     self.interpreter = interpreter_wrapper.Interpreter(
         model_path=resource_loader.get_path_to_datafile(
             'testdata/permute_float.tflite'))
     self.interpreter.allocate_tensors()
     self.input0 = self.interpreter.get_input_details()[0]['index']
     self.initial_data = np.array([[-1., -2., -3., -4.]], np.float32)
    def init_interpreter(self):
        model_path = resource_loader.get_path_to_datafile(self.tflitePath)
        with io.open(model_path, 'rb') as model_file:
            data = model_file.read()

        self.interpreter = interpreter_wrapper.Interpreter(model_content=data)
        self.interpreter.allocate_tensors()

        self.input_details = self.interpreter.get_input_details()
        print(self.input_details)

        input_details = self.input_details
        self.assertEqual(1, len(input_details))
Exemplo n.º 10
0
    def __init__(self):
        print("Creating TensorflowProcessor object")
        #wczytywanie modelu do wykrywania kulki
        print("Loading ball detection tflite model")
        self.ball_detector_interpreter = interpreter_wrapper.Interpreter(
            model_path=TensorflowProcessor.ball_detector_model_path)
        self.ball_detector_interpreter.allocate_tensors()
        self.ball_detector_input_details = self.ball_detector_interpreter.get_input_details(
        )
        self.ball_detector_output_details = self.ball_detector_interpreter.get_output_details(
        )

        #wczytywanie modelu do wykrywania krawedzi plyty
        print("Loading corner detection tflite model")
        self.corner_detector_interpreter = interpreter_wrapper.Interpreter(
            model_path=TensorflowProcessor.corner_detector_model_path)
        self.corner_detector_interpreter.allocate_tensors()
        self.corner_detector_input_details = self.corner_detector_interpreter.get_input_details(
        )
        self.corner_detector_output_details = self.corner_detector_interpreter.get_output_details(
        )

        print("TensorflowProcessor object created")
Exemplo n.º 11
0
 def load(self, model_path, inputs=None, outputs=None):
     self.sess = interpreter_wrapper.Interpreter(model_path=model_path)
     self.sess.allocate_tensors()
     self.input2index = {
         i["name"]: i["index"]
         for i in self.sess.get_input_details()
     }
     self.output2index = {
         i["name"]: i["index"]
         for i in self.sess.get_output_details()
     }
     self.inputs = list(self.input2index.keys())
     self.oututs = list(self.output2index.keys())
     return self
Exemplo n.º 12
0
 def prepare_model(self, checkpoint):
     print(checkpoint)
     self.floating_model = False
     self.interpreter = ip.Interpreter(model_path=checkpoint)
     self.interpreter.allocate_tensors()
     input_details = self.interpreter.get_input_details()
     output_details = self.interpreter.get_output_details()
     if input_details[0]['dtype'] == np.float32:
         self.floating_model = True
     # NxHxWxC, H:1, W:2
     self.height = input_details[0]['shape'][1]
     self.width = input_details[0]['shape'][2]
     self.input_index = input_details[0]['index']
     self.output_index = output_details[0]['index']
def run(model_file, image_data):
    interpreter = interpreter_wrapper.Interpreter(model_path=model_file)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    output_details = interpreter.get_output_details()

    # set input
    interpreter.set_tensor(input_details[0]['index'], image_data)

    # Run
    interpreter.invoke()

    # get output
    tflite_output = interpreter.get_tensor(output_details[0]['index'])

    return tflite_output
Exemplo n.º 14
0
    def __init__(self, model_file, labels_path, min_score):
        # Prune based on score
        self.min_score = min_score

        # TF Lite model
        self.interpreter = interpreter_wrapper.Interpreter(model_path=model_file)
        self.interpreter.allocate_tensors()

        self.input_details = self.interpreter.get_input_details()
        self.output_details = self.interpreter.get_output_details()

        if self.input_details[0]['dtype'] == type(np.float32(1.0)):
            self.floating_model = True
        else:
            self.floating_model = False

        # NxHxWxC, H:1, W:2
        self.model_input_height = self.input_details[0]['shape'][1]
        self.model_input_width = self.input_details[0]['shape'][2]

        # Load label map
        self.labels = load_labels(labels_path)
Exemplo n.º 15
0
import cv2
from imutils.video.pivideostream import PiVideoStream
from time import sleep
import math

from tensorflow.contrib.lite.python import interpreter as interpreter_wrapper

model_file = "/home/pi/ballance/ballance_net/ballancenet_conv_2_quant.tflite"

if False:
    converter = tf.contrib.lite.TocoConverter.from_saved_model(model_file)
    converter.post_training_quantize = True
    quant_model = converter.convert()
    open("ballancenet_conv_2_quant.tflite", "wb").write(quant_model)

interpreter = interpreter_wrapper.Interpreter(model_path=model_file)
interpreter.allocate_tensors()

input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()

print(input_details)

print("Starting processing")
videoStream = PiVideoStream(resolution=(400, 400), framerate=40).start()
sleep(1)

while True:
    frame = videoStream.read()
    cv2.imshow("frame", frame)
    key = cv2.waitKey(1) & 0xFF
Exemplo n.º 16
0
 def testInvalidModelFile(self):
     with self.assertRaisesRegexp(
             ValueError, 'Could not open \'totally_invalid_file_name\''):
         interpreter_wrapper.Interpreter(
             model_path='totally_invalid_file_name')
Exemplo n.º 17
0
def main(camera_FPS, camera_width, camera_height, inference_scale, threshold,
         num_threads):

    interpreter = None
    input_details = None
    output_details = None

    path = "pictures/"
    if not os.path.exists(path):
        os.mkdir(path)

    model_path = "OneClassAnomalyDetection-RaspberryPi3/DOC/model/"
    if os.path.exists(model_path):
        # LOF
        print("LOF model building...")
        x_train = np.loadtxt(model_path + "train.csv", delimiter=",")

        ms = MinMaxScaler()
        x_train = ms.fit_transform(x_train)

        # fit the LOF model
        clf = LocalOutlierFactor(n_neighbors=5)
        clf.fit(x_train)

        # DOC
        print("DOC Model loading...")
        interpreter = interpreter_wrapper.Interpreter(
            model_path="models/tensorflow/weights.tflite")
        interpreter.allocate_tensors()
        interpreter.set_num_threads(num_threads)
        input_details = interpreter.get_input_details()
        output_details = interpreter.get_output_details()
        print("loading finish")
    else:
        print("Nothing model folder")
        sys.exit(0)

    base_range = min(camera_width, camera_height)
    stretch_ratio = inference_scale / base_range
    resize_image_width = int(camera_width * stretch_ratio)
    resize_image_height = int(camera_height * stretch_ratio)

    if base_range == camera_height:
        crop_start_x = (resize_image_width - inference_scale) // 2
        crop_start_y = 0
    else:
        crop_start_x = 0
        crop_start_y = (resize_image_height - inference_scale) // 2
    crop_end_x = crop_start_x + inference_scale
    crop_end_y = crop_start_y + inference_scale

    fps = ""
    message = "Push [p] to take a picture"
    result = "Push [s] to start anomaly detection"
    flag_score = False
    picture_num = 1
    elapsedTime = 0
    score = 0
    score_mean = np.zeros(10)
    mean_NO = 0

    cap = cv2.VideoCapture(0)
    cap.set(cv2.CAP_PROP_FPS, camera_FPS)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, camera_width)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, camera_height)

    time.sleep(1)

    while cap.isOpened():
        t1 = time.time()

        ret, image = cap.read()

        if not ret:
            break

        image_copy = image.copy()

        # prediction
        if flag_score == True:
            prepimg = cv2.resize(image,
                                 (resize_image_width, resize_image_height))
            prepimg = prepimg[crop_start_y:crop_end_y, crop_start_x:crop_end_x]
            prepimg = np.array(prepimg).reshape(
                (1, inference_scale, inference_scale, 3))
            prepimg = prepimg / 255

            interpreter.set_tensor(input_details[0]['index'],
                                   np.array(prepimg, dtype=np.float32))
            interpreter.invoke()
            outputs = interpreter.get_tensor(output_details[0]['index'])

            outputs = outputs.reshape((len(outputs), -1))
            outputs = ms.transform(outputs)
            score = -clf._decision_function(outputs)

        # output score
        if flag_score == False:
            cv2.putText(image, result, (camera_width - 350, 100),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1,
                        cv2.LINE_AA)
        else:
            score_mean[mean_NO] = score[0]
            mean_NO += 1
            if mean_NO == len(score_mean):
                mean_NO = 0

            if np.mean(score_mean) > threshold:  #red if score is big
                cv2.putText(image, "{:.1f} Score".format(np.mean(score_mean)),
                            (camera_width - 230, 100),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1,
                            cv2.LINE_AA)
            else:  # blue if score is small
                cv2.putText(image, "{:.1f} Score".format(np.mean(score_mean)),
                            (camera_width - 230, 100),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1,
                            cv2.LINE_AA)

        # message
        cv2.putText(image, message, (camera_width - 285, 15),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1, cv2.LINE_AA)
        cv2.putText(image, fps, (camera_width - 164, 50),
                    cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv2.LINE_AA)

        cv2.imshow("Result", image)

        # FPS
        elapsedTime = time.time() - t1
        fps = "{:.0f} FPS".format(1 / elapsedTime)

        # quit or calculate score or take a picture
        key = cv2.waitKey(1) & 0xFF
        if key == ord("q"):
            break
        if key == ord("p"):
            cv2.imwrite(path + str(picture_num) + ".jpg", image_copy)
            picture_num += 1
        if key == ord("s"):
            flag_score = True

    cv2.destroyAllWindows()
Exemplo n.º 18
0
 def testInvalidModelContent(self):
     with self.assertRaisesRegexp(ValueError,
                                  'Model provided has model identifier \''):
         interpreter_wrapper.Interpreter(model_content=six.b('garbage'))
Exemplo n.º 19
0
def main(args):
    # Setup GPIO
    gpio.setmode(gpio.BCM)

    # Set up tflite interpreter
    floating_model = False
    interpreter = interpreter_wrapper.Interpreter(model_path=args.model_file)
    interpreter.allocate_tensors()
    input_details = interpreter.get_input_details()
    output_details = interpreter.get_output_details()

    # Check the type of the input tensor
    if input_details[0]['dtype'] == np.float32:
        floating_model = True
    # NxHxWxC, H:1, W:2
    height = input_details[0]['shape'][1]
    width = input_details[0]['shape'][2]

    # Initialize camera
    static_count = 0
    # i=0

    while True:
        stream = io.BytesIO()
        camera.capture(stream, format='jpeg')
        # "Rewind" the stream to the beginning so we can read its content
        stream.seek(0)
        image = Image.open(stream)
        image = image.resize((width, height))
        # image.save(str(i)+'image.jpeg')
        # i = i+1

        # add N dim
        input_data = np.expand_dims(image, axis=0)
        data = np.float32(input_data)
        if floating_model:
            input_data = (np.float32(input_data) -
                          args.input_mean) / args.input_std
        interpreter.set_num_threads(int(args.num_threads))
        interpreter.set_tensor(input_details[0]['index'], input_data)

        # Run inference
        start_time = time.time()
        interpreter.invoke()
        stop_time = time.time()
        print("time: ", stop_time - start_time)

        bounding_boxes = interpreter.get_tensor(output_details[0]['index'])
        bounding_boxes = np.squeeze(bounding_boxes)
        classes = interpreter.get_tensor(output_details[1]['index'])
        classes = np.squeeze(classes)
        scores = interpreter.get_tensor(output_details[2]['index'])
        scores = np.squeeze(scores)
        num_real_detections = interpreter.get_tensor(
            output_details[3]['index'])
        num_real_detections = int(np.squeeze(num_real_detections))

        image_center = np.array((0.5, 0.5))
        distances = []
        distance_vectors = []
        for i in range(num_real_detections):
            if scores[i] > 0.10:
                box_center = center(bounding_boxes[i])
                distances.append(l2_dist(box_center, image_center))
                distance_vectors.append(np.subtract(image_center, box_center))
        if len(distances) > 0:
            min_dist_idx = np.argmin(distances)
            if -0.2 < distances[min_dist_idx] < 0.2:
                static_count += 1
            if static_count >= 5:
                shoot_water()
                static_count = 0
            else:
                move_camera(distance_vectors[min_dist_idx])
        else:
            static_count = 0