def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--model',
                        help='File path of .tflite file.',
                        required=True)
    parser.add_argument('--labels',
                        help='File path of labels file.',
                        required=True)
    parser.add_argument('--threshold',
                        help='Score threshold for detected objects.',
                        required=False,
                        type=float,
                        default=0.4)
    args = parser.parse_args()

    labels = load_labels(args.labels)
    interpreter = Interpreter(
        args.model,
        experimental_delegates=[load_delegate('libedgetpu.so.1.0')])  #coral
    interpreter.allocate_tensors()
    _, input_height, input_width, _ = interpreter.get_input_details(
    )[0]['shape']

    # initialize variables to calculate FPS
    instantaneous_frame_rates = []

    # initialize variable for tracker use
    #trackers = []
    #j=0
    counter = 0
    t = None
    #win = dlib.image_window()
    test_start_time = time.monotonic()

    with picamera.PiCamera(resolution=(CAMERA_WIDTH, CAMERA_HEIGHT),
                           framerate=10) as camera:
        camera.start_preview()  #alpha = 200

        try:
            stream = io.BytesIO()
            annotator = Annotator(camera)

            for _ in camera.capture_continuous(stream,
                                               format='jpeg',
                                               use_video_port=True):
                #start_time = time.monotonic()
                #print("Test FPS: " + str(1/(time.monotonic() - test_start_time)))
                #test_start_time = time.monotonic()
                stream.seek(0)

                counter += 1

                #start_time = time.monotonic() #start_time declaration moved to give a more accurate measurement to calculate FPS
                test_start_time = time.monotonic()
                image = Image.open(stream).convert('RGB')
                dlib_img = np.asarray(image)
                print("Test FPS: " + str(1 /
                                         (time.monotonic() - test_start_time)))
                #image.save("test_save.jpg")

                #image = Image.open(stream).convert('RGB').resize((input_width, input_height), Image.ANTIALIAS)
                #image = image.resize((input_width, input_height), Image.ANTIALIAS)

                #dlib_img = dlib.load_rgb_image("/home/pi/Desktop/object_detection/object_detection_tpu_tracking_dlib/test_save.jpg")

                annotator.clear()

                # if there are no trackes, first must try to detect objects
                #if len(trackers) == 0:
                if counter < 10:
                    #dlib_img = np.asarray(image)

                    image = image.resize((input_width, input_height),
                                         Image.ANTIALIAS)
                    results = detect_objects(interpreter, image,
                                             args.threshold)

                    # get the coordinates for all bounding boxes within frame
                    rects = get_rects(results)

                    for i in np.arange(0, len(results)):
                        #format bounding box coordinates
                        box = np.array(rects[i])
                        (startY, startX, endY, endX) = box.astype("int")
                        print(startX, startY, endX, endY)

                        #x = (startX + endX) / 2
                        #y = (startY + endY) / 2

                        dlib_rect = dlib.rectangle(startX, startY, endX, endY)

                        t = dlib.correlation_tracker()
                        print("setting")
                        t.start_track(dlib_img, dlib_rect)

                        #trackers.append(t)

                    #annotator.clear()
                    #annotator.centroid(x, y)
                    annotate_objects(annotator, results, labels)

                else:

                    t.update(dlib_img)
                    pos = t.get_position()

                    startX = int(pos.left())
                    startY = int(pos.top())
                    endX = int(pos.right())
                    endY = int(pos.bottom())

                    x = (startX + endX) / 2
                    y = (startY + endY) / 2

                    #annotator.centroid(x, y)
                    #annotator.clear()

                    annotator.bounding_box([startX, startY, endX, endY])

                    #if (counter % 20) == 0:
                    #t = None

                #annotator.clear()
                annotator.update()

                stream.seek(0)
                stream.truncate()

                #print(time.monotonic())

        finally:
            camera.stop_preview()
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--model',
                        help='File path of .tflite file.',
                        required=True)
    parser.add_argument('--labels',
                        help='File path of labels file.',
                        required=True)
    parser.add_argument('--threshold',
                        help='Score threshold for detected objects.',
                        required=False,
                        type=float,
                        default=0.4)
    args = parser.parse_args()

    labels = load_labels(args.labels)
    interpreter = Interpreter(
        args.model,
        experimental_delegates=[load_delegate('libedgetpu.so.1.0')])  #coral
    interpreter.allocate_tensors()
    _, input_height, input_width, _ = interpreter.get_input_details(
    )[0]['shape']

    # initialize variables to calculate FPS
    instantaneous_frame_rates = []

    # initialize variable for tracker use

    t = None
    test_time_all = []
    counter = 0

    test_start_time = time.monotonic()

    with picamera.PiCamera(resolution=(CAMERA_WIDTH, CAMERA_HEIGHT),
                           framerate=30) as camera:
        camera.start_preview()  #alpha = 200
        start_time = time.monotonic()
        try:
            stream = io.BytesIO()
            annotator = Annotator(camera)

            for _ in camera.capture_continuous(stream,
                                               format='jpeg',
                                               use_video_port=True):

                test_time = (time.monotonic() - test_start_time)
                test_time_all.append(test_time)
                print(
                    str(sum(test_time_all) / len(test_time_all)) + ", FPS: " +
                    str(1 / (sum(test_time_all) / len(test_time_all))))

                stream.seek(0)

                counter += 1

                image = Image.open(stream).convert('RGB')
                cv_img = np.asarray(image)

                annotator.clear()

                # if there are no trackes, first must try to detect objects
                if t == None:
                    image = image.resize((input_width, input_height),
                                         Image.ANTIALIAS)
                    results = detect_objects(interpreter, image,
                                             args.threshold)

                    rects = get_rects(results)

                    for i in np.arange(0, len(results)):
                        #format bounding box coordinates
                        print("new tracker")
                        box = np.array(rects[i])
                        (startY, startX, endY, endX) = box.astype("int")
                        cv_rect = (startX, startY, endX - startX,
                                   endY - startY)

                        t = cv2.TrackerMOSSE_create()
                        t.init(cv_img, cv_rect)

                        annotator.bounding_box([startX, startY, endX, endY])

                    #annotate_objects(annotator, results, labels)

                else:

                    (success, box) = t.update(cv_img)

                    if success:
                        annotator.bounding_box(
                            [box[0], box[1], box[0] + box[2], box[1] + box[3]])
                        #cv2.rectangle(cv_img, (int(box[0]), int(box[1])), (int(box[0] + box[2]), int(box [1] + box[3])),(0, 255, 0), 2)

                    #if (counter % 40) == 0:
                    #t = None

                #elapsed_ms = (time.monotonic() - start_time) * 1000
                #annotator.text([5, 0], '%.1f ms' % (elapsed_ms))
                #frame_rate = 1/ ((time.monotonic() - start_time))
                #start_time = time.monotonic()
                #print(frame_rate)

                #calculate average FPS
                #instantaneous_frame_rates.append(frame_rate)
                #avg_frame_rate = sum(instantaneous_frame_rates)/len(instantaneous_frame_rates)
                #print("FPS: " + str(avg_frame_rate))
                #annotator.text([5, 15], '%.1f FPS' % (avg_frame_rate))

                #annotator.clear()
                annotator.update()

                stream.seek(0)
                stream.truncate()

                test_start_time = time.monotonic()

        finally:
            camera.stop_preview()