Example #1
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--model',
                        default="models/mobilenet_v1/detect.tflite",
                        help='File path of .tflite file.',
                        required=False)
    parser.add_argument('--labels',
                        default="models/mobilenet_v1/coco_labels.txt",
                        help='File path of labels file.',
                        required=False)
    parser.add_argument('--threshold',
                        help='Score threshold for detected objects.',
                        required=False,
                        type=float,
                        default=0.6)
    args = parser.parse_args()

    labels = load_labels(args.labels)
    interpreter = Interpreter(args.model)
    interpreter.allocate_tensors()
    _, input_height, input_width, _ = interpreter.get_input_details(
    )[0]['shape']

    with picamera.PiCamera(resolution=(CAMERA_WIDTH, CAMERA_HEIGHT),
                           framerate=30) as camera:
        camera.start_preview()
        try:
            stream = io.BytesIO()
            annotator = Annotator(camera)
            for _ in camera.capture_continuous(stream,
                                               format='jpeg',
                                               use_video_port=True):
                stream.seek(0)
                image = Image.open(stream).convert('RGB').resize(
                    (input_width, input_height), Image.ANTIALIAS).rotate(270)

                start_time = time.monotonic()
                results = detect_objects(interpreter, image, args.threshold)
                elapsed_ms = (time.monotonic() - start_time) * 1000

                annotator.clear()
                annotate_objects(annotator, results, labels)
                annotator.text([5, 0], '%.1fms' % (elapsed_ms))
                annotator.update()

                image.paste(annotator._buffer, (0, 0), annotator._buffer)
                image = image.resize((CAMERA_HEIGHT, CAMERA_WIDTH),
                                     Image.ANTIALIAS)
                image = image.save("detect.png")

                stream.seek(0)
                stream.truncate()

        finally:
            camera.stop_preview()
Example #2
0
def main():
  parser = argparse.ArgumentParser(
      formatter_class=argparse.ArgumentDefaultsHelpFormatter)
  parser.add_argument(
      '--model', help='File path of .tflite file.', required=True)
  parser.add_argument(
      '--labels', help='File path of labels file.', required=True)
  parser.add_argument(
      '--threshold',
      help='Score threshold for detected objects.',
      required=False,
      type=float,
      default=0.4)
  args = parser.parse_args()

  labels = load_labels(args.labels)
  interpreter = Interpreter(args.model experimental_delegates=[load_delegate('libedgetpu.so.1.0')])
  interpreter.allocate_tensors()
  _, input_height, input_width, _ = interpreter.get_input_details()[0]['shape']

  with picamera.PiCamera(resolution=(CAMERA_WIDTH, CAMERA_HEIGHT), framerate=30) as camera:
    print("Start recording..")
    output = StreamingOutput()
    camera.start_preview()
    camera.start_recording(output, format='mjpeg')
    try:

      address = ('', 8000)
      server = StreamingServer(address, StreamingHandler)
      server.serve_forever()
      stream = io.BytesIO()
      
      annotator = Annotator(camera)
      
      for _ in camera.capture_continuous(stream, format='jpeg', use_video_port=True):
        stream.seek(0)
        image = Image.open(stream).convert('RGB').resize((input_width, input_height), Image.ANTIALIAS)
        start_time = time.monotonic()
        results = detect_objects(interpreter, image, args.threshold)
        elapsed_ms = (time.monotonic() - start_time) * 1000

        annotator.clear()
        annotate_objects(annotator, results, labels)
        annotator.text([5, 0], '%.1fms' % (elapsed_ms))
        annotator.update()

        stream.seek(0)
        stream.truncate()

    finally:
      camera.stop_recording()
      camera.stop_preview()
Example #3
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument("--model",
                        help="File path of .tflite file.",
                        required=True)
    parser.add_argument("--labels",
                        help="File path of labels file.",
                        required=True)
    parser.add_argument(
        "--threshold",
        help="Score threshold for detected objects.",
        required=False,
        type=float,
        default=0.4,
    )
    args = parser.parse_args()

    labels = load_labels(args.labels)
    interpreter = Interpreter(args.model)
    interpreter.allocate_tensors()
    _, input_height, input_width, _ = interpreter.get_input_details(
    )[0]["shape"]

    with picamera.PiCamera(resolution=(CAMERA_WIDTH, CAMERA_HEIGHT),
                           framerate=30) as camera:
        camera.start_preview()
        try:
            stream = io.BytesIO()
            annotator = Annotator(camera)
            for _ in camera.capture_continuous(stream,
                                               format="jpeg",
                                               use_video_port=True):
                stream.seek(0)
                image = (Image.open(stream).convert("RGB").resize(
                    (input_width, input_height), Image.ANTIALIAS))
                start_time = time.monotonic()
                results = detect_objects(interpreter, image, args.threshold)
                elapsed_ms = (time.monotonic() - start_time) * 1000

                annotator.clear()
                annotate_objects(annotator, results, labels)
                annotator.text([5, 0], "%.1fms" % (elapsed_ms))
                annotator.update()

                stream.seek(0)
                stream.truncate()

        finally:
            camera.stop_preview()
def main():
  parser = argparse.ArgumentParser(
      formatter_class=argparse.ArgumentDefaultsHelpFormatter)
  parser.add_argument(
      '--model', help='File path of .tflite file.', required=True)
  parser.add_argument(
      '--labels', help='File path of labels file.', required=True)
  parser.add_argument(
      '--threshold',
      help='Score threshold for detected objects.',
      required=False,
      type=float,
      default=0.4)
  args = parser.parse_args()

  labels = load_labels(args.labels)
  interpreter = Interpreter(args.model)
  interpreter.allocate_tensors()
  _, input_height, input_width, _ = interpreter.get_input_details()[0]['shape']

  with picamera.PiCamera(
      resolution=(CAMERA_WIDTH, CAMERA_HEIGHT), framerate=30) as camera:
    camera.start_preview()
    try:
      stream = io.BytesIO()
      annotator = Annotator(camera)
      for _ in camera.capture_continuous(
          stream, format='jpeg', use_video_port=True):
        stream.seek(0)
        start_time = time.monotonic() #start_time declaration moved to give a more accurate measurement to calculate FPS
        image = Image.open(stream).convert('RGB').resize(
            (input_width, input_height), Image.ANTIALIAS)
        #start_time = time.monotonic()
        results = detect_objects(interpreter, image, args.threshold)
        #elapsed_ms = (time.monotonic() - start_time) * 1000

        annotator.clear()
        annotate_objects(annotator, results, labels)
        elapsed_ms = (time.monotonic() - start_time) * 1000
        annotator.text([5, 0], '%.1fms' % (elapsed_ms))
        frame_rate = 1/ ((time.monotonic() - start_time))
        annotator.text([5, 15], '%.1f FPS' % (frame_rate))
        annotator.update()

        stream.seek(0)
        stream.truncate()

    finally:
      camera.stop_preview()
Example #5
0
def detect(arg_labels, arg_interpreter, arg_threshold, preview):
    labels = load_labels(arg_labels)
    interpreter = Interpreter(arg_interpreter, num_threads=2)
    interpreter.allocate_tensors()
    _, input_height, input_width, _ = interpreter.get_input_details(
    )[0]['shape']

    with picamera.PiCamera(resolution=(CAMERA_WIDTH, CAMERA_HEIGHT),
                           framerate=30) as camera:
        camera.rotation = 180

        if (preview):
            camera.start_preview()

        try:
            stream = io.BytesIO()

            if (preview):
                annotator = Annotator(camera)

            for _ in camera.capture_continuous(stream,
                                               format='jpeg',
                                               use_video_port=True):

                stream.seek(0)
                image = Image.open(stream).convert('RGB').resize(
                    (input_width, input_height), Image.ANTIALIAS)

                start_time = time.monotonic()
                results = detect_objects(interpreter, image, arg_threshold)
                elapsed_ms = (time.monotonic() - start_time) * 1000

                print("Elapsed time (ms): ", elapsed_ms)
                print_object_labels(results, labels)

                if (preview):
                    annotator.clear()
                    annotate_objects(annotator, results, labels)
                    annotator.text([5, 0], '%.1fms' % (elapsed_ms))
                    annotator.update()

                stream.seek(0)
                stream.truncate()

        finally:

            if (preview):
                camera.stop_preview()
Example #6
0
def main(activate_status):
  parser = argparse.ArgumentParser(
      formatter_class=argparse.ArgumentDefaultsHelpFormatter)
  parser.add_argument(
      '--model', help='File path of .tflite file.', required=False, default='/home/pi/Desktop/twilio_hack/detect.tflite')
  parser.add_argument(
      '--labels', help='File path of labels file.', required=False, default='/home/pi/Desktop/twilio_hack/coco_labels.txt')

  parser.add_argument(
      '--threshold',
      help='Score threshold for detected objects.',
      required=False,
      type=float,
      default=0.65)
  args = parser.parse_args()

  labels = load_labels(args.labels)
  interpreter = Interpreter(args.model)
  interpreter.allocate_tensors()
  _, input_height, input_width, _ = interpreter.get_input_details()[0]['shape']
  if(activate_status):
        with picamera.PiCamera(resolution=(CAMERA_WIDTH, CAMERA_HEIGHT), framerate=30) as camera:
            camera.start_preview()
            try:
              stream = io.BytesIO()
              annotator = Annotator(camera)
              for _ in camera.capture_continuous(
                  stream, format='jpeg', use_video_port=True):
                stream.seek(0)
                image = Image.open(stream).convert('RGB').resize(
                    (input_width, input_height), Image.ANTIALIAS)
                start_time = time.monotonic()
                results = detect_objects(interpreter, image, args.threshold)
                elapsed_ms = (time.monotonic() - start_time) * 1000
                annotator.clear()
                annotate_objects(annotator, results, labels)
                #We'll send our message here as we don't want to lag the drawing of bounding boxes
                classes_id = [i['class_id'] for i in results if 'class_id' in i]
                if classes_id:
                    q.put(classes_id)
                annotator.text([5, 0], '%.1fms' % (elapsed_ms))
                annotator.update()

                stream.seek(0)
                stream.truncate()

            finally:
              camera.stop_preview()
def main():
  parser = argparse.ArgumentParser(
      formatter_class=argparse.ArgumentDefaultsHelpFormatter)
  parser.add_argument(
      '--model', help='File path of .tflite file.', required=True)
  parser.add_argument(
      '--labels', help='File path of labels file.', required=True)
  parser.add_argument(
      '--threshold',
      help='Score threshold for detected objects.',
      required=False,
      type=float,
      default=0.4)
  args = parser.parse_args()

  labels = load_labels(args.labels)
  interpreter = tf.lite.Interpreter(args.model)
  interpreter.allocate_tensors()
  _, input_height, input_width, _ = interpreter.get_input_details()[0]['shape']

  with picamera.PiCamera(
      resolution=(CAMERA_WIDTH, CAMERA_HEIGHT), framerate=5) as camera:

    try:
      stream = io.BytesIO()
      annotator = Annotator(camera)
      stream.seek(0)
      image = Image.open("/home/pi/examples/lite/examples/object_detection/raspberry_pi/test14.jpg").convert('RGB').resize(
            (input_width, input_height), Image.ANTIALIAS)
      
      start_time = time.monotonic()
      results = detect_objects(interpreter, image, args.threshold)
      elapsed_ms = (time.monotonic() - start_time) * 1000

      annotator.clear()
      annotate_objects(annotator, results, labels)
      annotator.text([5, 0], '%.1fms' % (elapsed_ms))
      annotator.update()
      stream.seek(0)
      stream.truncate()

      image.show()
      time.sleep(30)

    finally:
      #camera.stop_preview()
      print("")
Example #8
0
def main():

    #change to s3 bucket files
    interpreter = Interpreter("detect.tflite")
    labels = load_labels("coco_labels.txt")
    threshold = 0.5

    interpreter.allocate_tensors()
    _, input_height, input_width, _ = interpreter.get_input_details(
    )[0]['shape']

    with picamera.PiCamera(resolution=(CAMERA_WIDTH, CAMERA_HEIGHT),
                           framerate=30) as camera:
        camera.start_preview()
        try:
            stream = io.BytesIO()
            annotator = Annotator(camera)
            for _ in camera.capture_continuous(stream,
                                               format='jpeg',
                                               use_video_port=True):
                stream.seek(0)

                image = Image.open(stream).convert('RGB').resize(
                    (input_width, input_height), Image.ANTIALIAS)
                start_time = time.monotonic()
                results = detect_objects(interpreter, image, threshold)
                #print (results, type(results))
                elapsed_ms = (time.monotonic() - start_time) * 1000

                annotator.clear()
                annotate_objects(annotator, results, labels)
                annotator.text([5, 0], '%.1fms' % (elapsed_ms))
                annotator.update()

                stream.seek(0)
                stream.truncate()
                nbr = len(results)
                #print(f" Prediction result : {nbr}")
                playload = str(nbr)
                # print(playload)

        finally:
            camera.stop_preview()
    return playload
def main():

    model_path = 'data/detect.tflite'
    labels_path = 'data/coco_labels.txt'
    threshold = 0.4

    labels = load_labels(labels_path)
    interpreter = Interpreter(model_path)

    interpreter.allocate_tensors()
    _, input_height, input_width, _ = interpreter.get_input_details(
    )[0]['shape']

    with picamera.PiCamera(resolution=(CAMERA_WIDTH, CAMERA_HEIGHT),
                           framerate=30) as camera:
        camera.start_preview()
        try:
            stream = io.BytesIO()
            annotator = Annotator(camera)
            for _ in camera.capture_continuous(stream,
                                               format='jpeg',
                                               use_video_port=True):
                stream.seek(0)
                image = Image.open(stream).convert('RGB').resize(
                    (input_width, input_height), Image.ANTIALIAS)
                start_time = time.monotonic()
                results = detect_objects(interpreter, image, threshold)
                elapsed_ms = (time.monotonic() - start_time) * 1000

                annotator.clear()
                annotate_objects(annotator, results, labels)
                annotator.text([5, 0], '%.1fms' % (elapsed_ms))
                annotator.update()

                stream.seek(0)
                stream.truncate()

        finally:
            camera.stop_preview()
Example #10
0
def main():
    MODEL_PATH = BASE_DIR + '/pretrained_models/detect.tflite'
    LABEL_PATH = BASE_DIR + '/pretrained_models/coco_labels.txt'
    threshold = 0.4

    labels = load_labels(LABEL_PATH)
    interpreter = Interpreter(MODEL_PATH)
    interpreter.allocate_tensors()
    _, input_height, input_width, _ = interpreter.get_input_details(
    )[0]['shape']
    input_size = (input_width, input_height)

    camera = cv2.VideoCapture(index=0)
    camera.set(cv2.CAP_PROP_FPS, 3)
    annotator = Annotator(img_size=(640, 480))

    while (True):
        ret, in_img = camera.read()
        in_img = imutils.rotate_bound(in_img, angle=180)
        img = cv2.resize(in_img,
                         dsize=input_size,
                         interpolation=cv2.INTER_NEAREST)

        start_time = time.monotonic()
        results = detect_objects(interpreter, img, threshold)
        elapsed_ms = (time.monotonic() - start_time) * 1000

        annotator.clear()
        annotate_objects(annotator, results, labels)
        annotator.text([5, 0], '%.1fms' % (elapsed_ms))
        annotator.update(in_img)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    camera.release()
    cv2.destroyAllWindows()
Example #11
0
def watch_background(detector:Detector):
    """Start image detection with preview"""
    data = Data()
    t = Thread(target=data.timer_thread)
    with PiCamera(resolution=(CAMERA_WIDTH, CAMERA_HEIGHT), framerate=30) as camera:
        camera.vflip = False
        sleep(2)
        camera.exposure_mode='sports'
        camera.start_preview()
        sleep(2)
        t.start()
        stream = io.BytesIO()
        annotator = Annotator(camera, "green")
        while True:
            try:
                for _ in camera.capture_continuous(stream, format='jpeg',
                                                   resize=(detector.input_width, detector.input_height)):
                    stream.seek(0)
                    image = Image.open(stream).convert('RGB')
                    start_time = monotonic()
                    data.results = detector.detect_objects(image)
                    elapsed_ms = (monotonic() - start_time) * 1000

                    annotator.clear()
                    detector.annotate_objects(annotator, data.results)
                    annotator.text([5, 0], '%.1fms' % (elapsed_ms))
                    annotator.text([540, 0], f"Person count: {len(data.results)}")
                    annotator.update()
                    stream.seek(0)
            except KeyboardInterrupt:
                break
            finally:
                data.flag = False
                t.join()
                camera.stop_preview()
                print("Quitting\n")
Example #12
0
def main():

    ifEdgeTPU_1_else_0 = 1

    labels = load_labels('coco_labels.txt')

    #get interpreter for face detection model
    if ifEdgeTPU_1_else_0 == 1:
        interpreter = Interpreter(
            model_path=
            'models/ssd_mobilenet_v2_face_quant_postprocess_edgetpu.tflite',
            experimental_delegates=[load_delegate('libedgetpu.so.1.0')])
    else:
        interpreter = Interpreter(
            model_path='models/ssd_mobilenet_v2_face_quant_postprocess.tflite')

    interpreter.allocate_tensors()
    _, input_height, input_width, _ = interpreter.get_input_details(
    )[0]['shape']

    person_number = 1  # Change the number of the person you scan. It will create a new number for that person
    count_images_saved = 0

    if os.path.isdir('scanned_people') == False:
        os.mkdir('scanned_people')

    if os.path.isdir('scanned_people/' + str(person_number)) == False:
        os.mkdir('scanned_people/' + str(person_number))
        os.mkdir('scanned_people/' + str(person_number) + '/png')
        os.mkdir('scanned_people/' + str(person_number) + '/npy')
    else:
        shutil.rmtree('scanned_people/' + str(person_number))
        os.mkdir('scanned_people/' + str(person_number))
        os.mkdir('scanned_people/' + str(person_number) + '/png')
        os.mkdir('scanned_people/' + str(person_number) + '/npy')

    with picamera.PiCamera(resolution=(CAMERA_WIDTH, CAMERA_HEIGHT),
                           framerate=30) as camera:
        camera.rotation = 270
        camera.start_preview()
        try:
            stream = io.BytesIO()
            annotator = Annotator(camera)
            for _ in camera.capture_continuous(stream,
                                               format='jpeg',
                                               use_video_port=True):
                stream.seek(0)
                image_large = Image.open(stream)
                image = image_large.convert('RGB').resize(
                    (input_width, input_height), Image.ANTIALIAS)
                start_time = time.monotonic()
                results = detect_objects(interpreter, image, 0.9)
                elapsed_ms = (time.monotonic() - start_time) * 1000
                #print(image.size)

                annotator.clear()
                annotate_objects(annotator, results, labels)
                annotator.text([5, 0], '%.1fms' % (elapsed_ms))
                annotator.update()

                ymin, xmin, ymax, xmax, score = get_best_box_param(
                    results, CAMERA_WIDTH, CAMERA_HEIGHT)

                if score > 0.99:
                    #print(ymin, " ", xmin, " ", ymax, " ", xmax)
                    #print(image_large.size)
                    img = np.array(image_large)
                    #print("img: ", img.shape)
                    img_cut = img[ymin:ymax, xmin:xmax, :]
                    print(img_cut.shape)
                    img_cut = cv2.resize(
                        img_cut, dsize=(96, 96),
                        interpolation=cv2.INTER_CUBIC).astype('uint8')
                    img_cut_pil = Image.fromarray(img_cut)
                    img_cut_pil.save('scanned_people/' + str(person_number) +
                                     '/png/img_' + str(count_images_saved) +
                                     '.png')
                    np.save(
                        'scanned_people/' + str(person_number) + '/npy/img_' +
                        str(count_images_saved), img_cut)
                    count_images_saved = count_images_saved + 1

                stream.seek(0)
                stream.truncate()

        finally:
            camera.stop_preview()
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--model',
                        help='File path of .tflite file.',
                        required=True)
    parser.add_argument('--labels',
                        help='File path of labels file.',
                        required=True)
    parser.add_argument('--threshold',
                        help='Score threshold for detected objects.',
                        required=False,
                        type=float,
                        default=0.4)
    args = parser.parse_args()

    labels = load_labels(args.labels)
    interpreter = Interpreter(args.model)
    interpreter.allocate_tensors()
    _, input_height, input_width, _ = interpreter.get_input_details(
    )[0]['shape']

    stat = True
    count = 0
    distance = 10000.0

    with picamera.PiCamera(resolution=(CAMERA_WIDTH, CAMERA_HEIGHT),
                           framerate=30) as camera:
        camera.start_preview()
        try:
            stream = io.BytesIO()
            annotator = Annotator(camera)
            for _ in camera.capture_continuous(stream,
                                               format='jpeg',
                                               use_video_port=True):
                stream.seek(0)
                image = Image.open(stream).convert('RGB').resize(
                    (input_width, input_height), Image.ANTIALIAS)
                start_time = time.monotonic()
                results = detect_objects(interpreter, image, args.threshold)
                elapsed_ms = (time.monotonic() - start_time) * 1000

                person = False
                for result in results:
                    if result['class_id'] == 0.0:
                        person = True
                if person:
                    ser.write(bytes("2", "utf-8"))
                else:
                    ser.write(bytes("0", "utf-8"))

                if ser.in_waiting > 0:
                    msg = ser.readline()
                    msg = msg.decode("utf-8")
                    msg = msg.replace("\\n", "")
                    msg = msg.replace("\\r", "")
                    #msg = msg.replace("\\x","")
                    #print(msg)
                    try:
                        distance = float(msg)
                    except:
                        stat = stat

                    if distance < 15:
                        if stat:
                            count += 1
                            stat = False
                    else:
                        stat = True
                    print("Distance: ", distance, "Number of Push Ups: ",
                          count)

                annotator.clear()
                annotate_objects(annotator, results, labels)
                annotator.text([5, 0], '%.1fms' % (elapsed_ms))
                annotator.update()

                stream.seek(0)
                stream.truncate()

        finally:
            camera.stop_preview()
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--model',
                        help='File path of .tflite file.',
                        required=True)
    parser.add_argument('--labels',
                        help='File path of labels file.',
                        required=True)
    parser.add_argument('--threshold',
                        help='Score threshold for detected objects.',
                        required=False,
                        type=float,
                        default=0.4)
    args = parser.parse_args()

    labels = load_labels(args.labels)
    interpreter = Interpreter(
        args.model,
        experimental_delegates=[load_delegate('libedgetpu.so.1.0')])  #coral
    interpreter.allocate_tensors()
    _, input_height, input_width, _ = interpreter.get_input_details(
    )[0]['shape']

    # initialize variables to calculate FPS
    instantaneous_frame_rates = []

    # initialize variable for tracker use

    t = None
    test_time_all = []
    counter = 0

    test_start_time = time.monotonic()

    with picamera.PiCamera(resolution=(CAMERA_WIDTH, CAMERA_HEIGHT),
                           framerate=30) as camera:
        camera.start_preview()  #alpha = 200
        start_time = time.monotonic()
        try:
            stream = io.BytesIO()
            annotator = Annotator(camera)

            for _ in camera.capture_continuous(stream,
                                               format='jpeg',
                                               use_video_port=True):

                test_time = (time.monotonic() - test_start_time)
                test_time_all.append(test_time)
                print(
                    str(sum(test_time_all) / len(test_time_all)) + ", FPS: " +
                    str(1 / (sum(test_time_all) / len(test_time_all))))

                stream.seek(0)

                counter += 1

                image = Image.open(stream).convert('RGB')
                cv_img = np.asarray(image)

                annotator.clear()

                # if there are no trackes, first must try to detect objects
                if t == None:
                    image = image.resize((input_width, input_height),
                                         Image.ANTIALIAS)
                    results = detect_objects(interpreter, image,
                                             args.threshold)

                    rects = get_rects(results)

                    for i in np.arange(0, len(results)):
                        #format bounding box coordinates
                        print("new tracker")
                        box = np.array(rects[i])
                        (startY, startX, endY, endX) = box.astype("int")
                        cv_rect = (startX, startY, endX - startX,
                                   endY - startY)

                        t = cv2.TrackerMOSSE_create()
                        t.init(cv_img, cv_rect)

                        annotator.bounding_box([startX, startY, endX, endY])

                    #annotate_objects(annotator, results, labels)

                else:

                    (success, box) = t.update(cv_img)

                    if success:
                        annotator.bounding_box(
                            [box[0], box[1], box[0] + box[2], box[1] + box[3]])
                        #cv2.rectangle(cv_img, (int(box[0]), int(box[1])), (int(box[0] + box[2]), int(box [1] + box[3])),(0, 255, 0), 2)

                    #if (counter % 40) == 0:
                    #t = None

                #elapsed_ms = (time.monotonic() - start_time) * 1000
                #annotator.text([5, 0], '%.1f ms' % (elapsed_ms))
                #frame_rate = 1/ ((time.monotonic() - start_time))
                #start_time = time.monotonic()
                #print(frame_rate)

                #calculate average FPS
                #instantaneous_frame_rates.append(frame_rate)
                #avg_frame_rate = sum(instantaneous_frame_rates)/len(instantaneous_frame_rates)
                #print("FPS: " + str(avg_frame_rate))
                #annotator.text([5, 15], '%.1f FPS' % (avg_frame_rate))

                #annotator.clear()
                annotator.update()

                stream.seek(0)
                stream.truncate()

                test_start_time = time.monotonic()

        finally:
            camera.stop_preview()
Example #15
0
def main():
  # initialize variables to calculate FPS
  instantaneous_frame_rates = []
  
  counter = 0
  t = None
  #win = dlib.image_window()

  with picamera.PiCamera(
      resolution=(CAMERA_WIDTH, CAMERA_HEIGHT), framerate=25) as camera:
    camera.start_preview() #alpha = 200
    try:
      stream = io.BytesIO()
      annotator = Annotator(camera)
      
      for _ in camera.capture_continuous(
          stream, format='jpeg', use_video_port=True):
        stream.seek(0)
        
        start_time = time.monotonic() #start_time declaration moved to give a more accurate measurement to calculate FPS

        image = Image.open(stream).convert('RGB')
        dlib_img = np.asarray(image)
        
        annotator.clear()
        
        if t == None:
            t = dlib.correlation_tracker()
            dlib_rect = dlib.rectangle(0, 0, 100, 100)
            t.start_track(dlib_img, dlib_rect)
            
        else:
            t.update(dlib_img)
            pos = t.get_position()
            
            startX = int(pos.left())
            startY = int(pos.top())
            endX = int(pos.right())
            endY = int(pos.bottom())
            
            x = (startX + endX) / 2
            y = (startY + endY) / 2
            
            annotator.centroid(x, y)
            #annotator.clear()  
            #annotator.bounding_box([startX, startY, endX, endY])
  
            
        elapsed_ms = (time.monotonic() - start_time) * 1000
        annotator.text([5, 0], '%.1f ms' % (elapsed_ms))
        frame_rate = 1/ ((time.monotonic() - start_time))
        
        #calculate average FPS
        instantaneous_frame_rates.append(frame_rate)
        avg_frame_rate = sum(instantaneous_frame_rates)/len(instantaneous_frame_rates)
        print("FPS: " + str(avg_frame_rate))
        annotator.text([5, 15], '%.1f FPS' % (avg_frame_rate))
        
        #annotator.clear()
        annotator.update() 
        
        stream.seek(0)
        stream.truncate()
        
        

    finally:
      camera.stop_preview()
Example #16
0
def main(lidar_data_queue):
    global perceptron_network

    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--model',
                        help='File path of .tflite file.',
                        required=True)
    parser.add_argument('--labels',
                        help='File path of labels file.',
                        required=True)
    parser.add_argument('--threshold',
                        help='Score threshold for detected objects.',
                        required=False,
                        type=float,
                        default=0.6)
    args = parser.parse_args()

    labels = load_labels(args.labels)
    interpreter = Interpreter(
        args.model,
        experimental_delegates=[load_delegate('libedgetpu.so.1.0')])
    interpreter.allocate_tensors()
    _, input_height, input_width, _ = interpreter.get_input_details(
    )[0]['shape']
    print(input_height, input_width)

    count = 0

    with picamera.PiCamera(resolution=(CAMERA_WIDTH, CAMERA_HEIGHT)) as camera:
        camera.rotation = 180
        camera.start_preview()
        try:
            stream = io.BytesIO()
            annotator = Annotator(camera)
            for _ in camera.capture_continuous(stream,
                                               format='jpeg',
                                               use_video_port=True):
                start_time = time.monotonic()
                stream.seek(0)
                image = Image.open(stream).convert('RGB').resize(
                    (input_width, input_height), Image.ANTIALIAS)

                #                 start_time = time.monotonic()
                results = detect_objects(interpreter, image, args.threshold)
                #elapsed_ms = (time.monotonic() - start_time) * 1000

                annotator.clear()
                middle_xy = annotate_objects(annotator, results, labels)
                #annotator.text([5, 0], '%.1fms' % (elapsed_ms))
                # annotator.update()

                if perceptron_network.person_detected == True:  # this only changes the first time a person is detected
                    data = []
                    if lidar_data_queue.empty() is False:
                        data = lidar_data_queue.get()
                    else:
                        data = [0]

                    if middle_xy[
                            0] != 0:  # or ((middle_xy[0] < 300 and middle_xy != 0) and perceptron_network.first_arc_turn == True) or (middle_xy[0] > 20 and perceptron_network.first_arc_turn == True):

                        perceptron_network.person_detected_queue.insert(
                            0, True)  # keep track og frames with person in it
                        perceptron_network.person_detected_queue.pop()

                        object_width = middle_xy[2]
                        print('object width: ', object_width)
                        if perceptron_network.first_arc_turn == True:
                            object_width = 80
                        distance_to_target = perceptron_network.getDistanceToPerson(
                            object_width)
                        print('distance = ', distance_to_target)
                        new_motor_speeds = perceptron_network.followTarget(
                            middle_xy, distance_to_target, data)
                        motorSpeedLeft(
                            1,
                            round(perceptron_network.motor_speed_total_left))
                        motorSpeedRight(
                            1,
                            round(perceptron_network.motor_speed_total_right))
                        print(
                            "Left motor: ",
                            round(perceptron_network.motor_speed_total_left),
                            " Right motor: ",
                            round(perceptron_network.motor_speed_total_right))
                        print('..........................................')

                    elif perceptron_network.first_arc_turn == True or (
                            perceptron_network.first_reflex_turn == True):
                        arc_motor_speeds = perceptron_network.makeArcTurn(
                            perceptron_network.reflex_avoiding_obstacle_dist +
                            400)
                        perceptron_network.motor_speed_total_left = arc_motor_speeds[
                            0]
                        perceptron_network.motor_speed_total_right = arc_motor_speeds[
                            1]
                        motorSpeedLeft(1, arc_motor_speeds[0])
                        motorSpeedRight(1, arc_motor_speeds[1])
                        print('second',
                              perceptron_network.motor_speed_total_left)
                    elif perceptron_network.first_arc_turn == True or (
                            perceptron_network.making_avoidance_turn == True):
                        arc_motor_speeds = perceptron_network.makeArcTurn(
                            perceptron_network.start_avoiding_obstacle_dist +
                            300)
                        perceptron_network.motor_speed_total_left = arc_motor_speeds[
                            0]
                        perceptron_network.motor_speed_total_right = arc_motor_speeds[
                            1]
                        motorSpeedLeft(1, arc_motor_speeds[0])
                        motorSpeedRight(1, arc_motor_speeds[1])
                        print('second',
                              perceptron_network.motor_speed_total_left)

                    else:
                        perceptron_network.person_detected_queue.insert(
                            0, False)
                        perceptron_network.person_detected_queue.pop()
                        # Is all the last 15 frames was without a person
                        if any(perceptron_network.person_detected_queue
                               ) == False:
                            perceptron_network.motor_speed_total_left = 0
                            perceptron_network.motor_speed_total_right = 0
                            perceptron_network.motor_speed_distance = 0
                            print("Locating target....")

                            perceptron_network.robot_is_stationary = True
                            if perceptron_network.side_left_person_last_detected == True:
                                motorSpeedLeft(0, 19)
                                motorSpeedRight(1, 19)
                            elif perceptron_network.side_left_person_last_detected == False:
                                motorSpeedLeft(1, 19)
                                motorSpeedRight(0, 19)

                        # For calibrating the focal length
    #                 focal = perceptron_network.getPercievedFocal(object_width, 2000, 500)
    #                 print('focal = ', focal)
                elapsed_ms = (time.monotonic() - start_time) * 1000
                annotator.text([5, 0], '%.1fms' % (elapsed_ms))
                annotator.update()
                frame_times_for_coral_test.append(elapsed_ms)

                #print(perceptron_network.getPercievedFocal(object_height, distance_test, person_height))

                stream.seek(0)
                stream.truncate()
        except KeyboardInterrupt:
            print('Saving distance data and shutting down')
            motorSpeedLeft(1, 0)
            motorSpeedRight(1, 0)
            toggleLED({})

            frame_average = sum(frame_times_for_coral_test) / len(
                frame_times_for_coral_test)
            #perceptron_network.save_test1()
            #perceptron_network.save_test2()
            #perceptron_network.save_test3()
            #perceptron_network.save_test4()
            perceptron_network.saveWeights()


#        makePlots(perceptron_network.percep_l_wheel.weights_for_test, perceptron_network.percep_r_wheel.weights_for_test, perceptron_network.percep_far_distance.weights_for_test, perceptron_network.distances_for_test)

#         file = open('distances.csv', 'w')
#         file.truncate()
#         with file:
#             writer = csv.writer(file)
#             writer.writerow(perceptron_network.distances_for_test)
#             writer.writerow(perceptron_network.percep_l_wheel.weights_for_test)
#             writer.writerow(perceptron_network.percep_r_wheel.weights_for_test)
#             writer.writerow(perceptron_network.percep_distance.weights_for_test)

        finally:
            camera.stop_preview()
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--model',
                        help='File path of .tflite file.',
                        required=True)
    parser.add_argument('--labels',
                        help='File path of labels file.',
                        required=True)
    parser.add_argument('--threshold',
                        help='Score threshold for detected objects.',
                        required=False,
                        type=float,
                        default=0.4)
    args = parser.parse_args()

    labels = load_labels(args.labels)
    interpreter = Interpreter(
        args.model,
        experimental_delegates=[load_delegate('libedgetpu.so.1.0')])  #coral
    interpreter.allocate_tensors()
    _, input_height, input_width, _ = interpreter.get_input_details(
    )[0]['shape']

    #initialize variables to calculate FPS
    instantaneous_frame_rates = []

    with picamera.PiCamera(resolution=(CAMERA_WIDTH, CAMERA_HEIGHT),
                           framerate=10) as camera:
        camera.start_preview()  #alpha = 200
        try:
            stream = io.BytesIO()
            annotator = Annotator(camera)
            for _ in camera.capture_continuous(stream,
                                               format='jpeg',
                                               use_video_port=True):
                stream.seek(0)
                start_time = time.monotonic(
                )  #start_time declaration moved to give a more accurate measurement to calculate FPS
                image = Image.open(stream).convert('RGB').resize(
                    (input_width, input_height), Image.ANTIALIAS)
                results = detect_objects(interpreter, image, args.threshold)

                # get the coordinates for all bounding boxes within frame
                rects = get_rects(results)

                # return active objects from the centroid tracker
                objects = ct.update(rects)

                annotator.clear()
                annotate_objects(annotator, results, labels)

                for (objectID, centroid) in objects.items():

                    text = "ID {}".format(objectID)
                    annotator.text([centroid[0], centroid[1]], text)

                elapsed_ms = (time.monotonic() - start_time) * 1000
                annotator.text([5, 0], '%.1f ms' % (elapsed_ms))
                frame_rate = 1 / ((time.monotonic() - start_time))
                #annotator.text([5, 15], '%.1f FPS' % (frame_rate))
                #print('%.1f FPS' % (frame_rate))
                #annotator.update()

                #calculate average FPS
                instantaneous_frame_rates.append(frame_rate)
                avg_frame_rate = sum(instantaneous_frame_rates) / len(
                    instantaneous_frame_rates)
                print("FPS: " + str(avg_frame_rate))
                annotator.text([5, 15], '%.1f FPS' % (avg_frame_rate))

                annotator.update()

                stream.seek(0)
                stream.truncate()

        finally:
            camera.stop_preview()
Example #18
0
def main():

  ifEdgeTPU_1_else_0 = 1
  
  labels = load_labels('coco_labels.txt')
  people_lables = load_labels('people_labels.txt')
  
  #get interpreter for face detection model
  if ifEdgeTPU_1_else_0 == 1:
      interpreter = Interpreter(model_path = 'models/ssd_mobilenet_v2_face_quant_postprocess_edgetpu.tflite',
        experimental_delegates=[load_delegate('libedgetpu.so.1.0')])
  else:
      interpreter = Interpreter(model_path = 'models/ssd_mobilenet_v2_face_quant_postprocess.tflite')
  
  interpreter.allocate_tensors()
  _, input_height, input_width, _ = interpreter.get_input_details()[0]['shape']
  
  #get interpreter for face embedding model
  if ifEdgeTPU_1_else_0 == 1:
      interpreter_emb = Interpreter(model_path = 'models/Mobilenet1_triplet1589223569_triplet_quant_edgetpu.tflite',
        experimental_delegates=[load_delegate('libedgetpu.so.1.0')])
  else:
      interpreter_emb = Interpreter(model_path = 'models/Mobilenet1_triplet1589223569_triplet_quant.tflite')

  interpreter_emb.allocate_tensors()

  with picamera.PiCamera(
      resolution=(CAMERA_WIDTH, CAMERA_HEIGHT), framerate=30) as camera:
      #resolution=(320, 320), framerate=30) as camera:
    camera.rotation=270
    camera.start_preview()
    try:
      stream = io.BytesIO()
      annotator = Annotator(camera)
      for _ in camera.capture_continuous(
          stream, format='jpeg', use_video_port=True):
        stream.seek(0)
        image_large = Image.open(stream)
        image = image_large.convert('RGB').resize(
            (input_width, input_height), Image.ANTIALIAS)
        start_time = time.monotonic()
        results = detect_objects(interpreter, image, 0.5)
        elapsed_ms = (time.monotonic() - start_time) * 1000
        #print(image.size)

        annotator.clear()
        annotate_objects(annotator, results, labels)
        annotator.text([5, 0], '%.1fms' % (elapsed_ms))
        annotator.update()
        
        ymin, xmin, ymax, xmax, score = get_best_box_param(results,CAMERA_WIDTH,CAMERA_HEIGHT)
        
        if score > 0.96:
            #print(ymin, " ", xmin, " ", ymax, " ", xmax)
            #print(image_large.size)
            img = np.array(image_large)
            #print("img: ", img.shape)
            #img = np.asarray(image_large).reshape(CAMERA_WIDTH,CAMERA_HEIGHT,3)
            #print(img.shape)
            #plt.imshow(img)
            #plt.show()
            img_cut = img[ymin:ymax,xmin:xmax,:]
            #print(img_cut.shape)
            img_cut = cv2.resize(img_cut, dsize=(96, 96), interpolation=cv2.INTER_CUBIC).astype('uint8')
            img_cut = img_cut.reshape(1,96,96,3)/255.
            #emb = FRmodel.predict(img_cut)
            emb = img_to_emb(interpreter_emb,img_cut)
            get_person_from_embedding(people_lables,emb)
            

        stream.seek(0)
        stream.truncate()

    finally:
      camera.stop_preview()
Example #19
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--model',
                        help='File path of .tflite file.',
                        required=True)
    parser.add_argument('--labels',
                        help='File path of labels file.',
                        required=True)
    parser.add_argument('--threshold',
                        help='Score threshold for detected objects.',
                        required=False,
                        type=float,
                        default=0.4)
    args = parser.parse_args()

    labels = load_labels(args.labels)
    interpreter = tf.lite.Interpreter(args.model)
    interpreter.allocate_tensors()
    _, input_height, input_width, _ = interpreter.get_input_details(
    )[0]['shape']

    keys = {
        'tl': 4,
        'press': 9,
        'tr': 15,
        'x': 16,
        'up': 21,
        'right': 22,
        'left': 23,
        'start': 24,
        'select': 25,
        'y': 26,
        'down': 27,
        'b': 28,
        'a': 29
    }
    wiringpi.wiringPiSetup()

    for key in keys:
        wiringpi.pinMode(keys[key], wiringpi.GPIO.INPUT)
        wiringpi.pullUpDnControl(keys[key], wiringpi.GPIO.PUD_UP)

    with picamera.PiCamera(resolution=(CAMERA_WIDTH, CAMERA_HEIGHT),
                           framerate=30) as camera:
        # 上下逆に組み込んでいる
        camera.rotation = 180
        camera.start_preview()
        try:
            stream = io.BytesIO()
            annotator = Annotator(camera)
            for _ in camera.capture_continuous(stream,
                                               format='jpeg',
                                               use_video_port=True):
                stream.seek(0)
                image = Image.open(stream).convert('RGB').resize(
                    (input_width, input_height), Image.ANTIALIAS)
                start_time = time.monotonic()
                results = detect_objects(interpreter, image, args.threshold)
                elapsed_ms = (time.monotonic() - start_time) * 1000

                annotator.clear()
                annotate_objects(annotator, results, labels)
                annotator.text([5, 0], '%.1fms' % (elapsed_ms))
                annotator.update()

                stream.seek(0)
                stream.truncate()

                if wiringpi.digitalRead(keys['start']) == 0:
                    print('press start to break')
                    break

        finally:
            camera.stop_preview()
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--model',
                        help='File path of .tflite file.',
                        required=True)
    parser.add_argument('--labels',
                        help='File path of labels file.',
                        required=True)
    parser.add_argument('--threshold',
                        help='Score threshold for detected objects.',
                        required=False,
                        type=float,
                        default=0.4)
    args = parser.parse_args()

    labels = load_labels(args.labels)
    interpreter = Interpreter(
        args.model,
        experimental_delegates=[load_delegate('libedgetpu.so.1.0')])  #coral
    interpreter.allocate_tensors()
    _, input_height, input_width, _ = interpreter.get_input_details(
    )[0]['shape']

    # initialize variables to calculate FPS
    instantaneous_frame_rates = []

    # initialize variable for tracker use
    #trackers = []
    #j=0
    counter = 0
    t = None
    #win = dlib.image_window()
    test_start_time = time.monotonic()

    with picamera.PiCamera(resolution=(CAMERA_WIDTH, CAMERA_HEIGHT),
                           framerate=10) as camera:
        camera.start_preview()  #alpha = 200

        try:
            stream = io.BytesIO()
            annotator = Annotator(camera)

            for _ in camera.capture_continuous(stream,
                                               format='jpeg',
                                               use_video_port=True):
                #start_time = time.monotonic()
                #print("Test FPS: " + str(1/(time.monotonic() - test_start_time)))
                #test_start_time = time.monotonic()
                stream.seek(0)

                counter += 1

                #start_time = time.monotonic() #start_time declaration moved to give a more accurate measurement to calculate FPS
                test_start_time = time.monotonic()
                image = Image.open(stream).convert('RGB')
                dlib_img = np.asarray(image)
                print("Test FPS: " + str(1 /
                                         (time.monotonic() - test_start_time)))
                #image.save("test_save.jpg")

                #image = Image.open(stream).convert('RGB').resize((input_width, input_height), Image.ANTIALIAS)
                #image = image.resize((input_width, input_height), Image.ANTIALIAS)

                #dlib_img = dlib.load_rgb_image("/home/pi/Desktop/object_detection/object_detection_tpu_tracking_dlib/test_save.jpg")

                annotator.clear()

                # if there are no trackes, first must try to detect objects
                #if len(trackers) == 0:
                if counter < 10:
                    #dlib_img = np.asarray(image)

                    image = image.resize((input_width, input_height),
                                         Image.ANTIALIAS)
                    results = detect_objects(interpreter, image,
                                             args.threshold)

                    # get the coordinates for all bounding boxes within frame
                    rects = get_rects(results)

                    for i in np.arange(0, len(results)):
                        #format bounding box coordinates
                        box = np.array(rects[i])
                        (startY, startX, endY, endX) = box.astype("int")
                        print(startX, startY, endX, endY)

                        #x = (startX + endX) / 2
                        #y = (startY + endY) / 2

                        dlib_rect = dlib.rectangle(startX, startY, endX, endY)

                        t = dlib.correlation_tracker()
                        print("setting")
                        t.start_track(dlib_img, dlib_rect)

                        #trackers.append(t)

                    #annotator.clear()
                    #annotator.centroid(x, y)
                    annotate_objects(annotator, results, labels)

                else:

                    t.update(dlib_img)
                    pos = t.get_position()

                    startX = int(pos.left())
                    startY = int(pos.top())
                    endX = int(pos.right())
                    endY = int(pos.bottom())

                    x = (startX + endX) / 2
                    y = (startY + endY) / 2

                    #annotator.centroid(x, y)
                    #annotator.clear()

                    annotator.bounding_box([startX, startY, endX, endY])

                    #if (counter % 20) == 0:
                    #t = None

                #annotator.clear()
                annotator.update()

                stream.seek(0)
                stream.truncate()

                #print(time.monotonic())

        finally:
            camera.stop_preview()
def greengrass_app():
    try:
        client = greengrasssdk.client('iot-data')

        iot_topic = 'raspberry/out'
        client.publish(topic=iot_topic,
                       payload='Loading object detection model')
        DB = boto3.resource('dynamodb')
        table = DB.Table(__TableName__)

        interpreter.allocate_tensors()
        _, input_height, input_width, _ = interpreter.get_input_details(
        )[0]['shape']
        # Do inference until the lambda is killed.
        client.publish(topic=iot_topic,
                       payload='object detection model loaded')
        while True:
            with picamera.PiCamera(resolution=(CAMERA_WIDTH, CAMERA_HEIGHT),
                                   framerate=30) as camera:
                camera.start_preview()
                try:
                    stream = io.BytesIO()
                    annotator = Annotator(camera)
                    for _ in camera.capture_continuous(stream,
                                                       format='jpeg',
                                                       use_video_port=True):
                        stream.seek(0)

                        image = Image.open(stream).convert('RGB').resize(
                            (input_width, input_height), Image.ANTIALIAS)
                        start_time = time.monotonic()
                        results = detect_objects(interpreter, image, threshold)
                        #print (results, type(results))
                        elapsed_ms = (time.monotonic() - start_time) * 1000

                        annotator.clear()
                        annotate_objects(annotator, results, labels)
                        annotator.text([5, 0], '%.1fms' % (elapsed_ms))
                        annotator.update()

                        stream.seek(0)
                        stream.truncate()
                        nbr = len(results)
                        #print(f" Prediction result : {nbr}")
                        p = str(nbr)
                        # valeur = int(nbr)
                        client.publish(
                            topic=iot_topic,
                            payload='The Number of persons detected :' + p +
                            '!!!')

                        # update Item on dynamodb
                        # this variable contain the number of person detected
                        valeur = int(nbr)
                        response = table.update_item(
                            Key={"id": 0},
                            UpdateExpression='SET #ts = :val1',
                            ExpressionAttributeValues={":val1": valeur},
                            ExpressionAttributeNames={"#ts": "value"})

                except Exception as e:
                    client.publish(topic=iot_topic,
                                   payload='Error inside Pica ')
    except Exception as e:
        client.publish(topic=iot_topic, payload='Error inside lambda ')
Example #22
0
def main():
  parser = argparse.ArgumentParser(
      formatter_class=argparse.ArgumentDefaultsHelpFormatter)
  parser.add_argument(
      '--model', help='File path of .tflite file.', required=True)
  parser.add_argument(
      '--labels', help='File path of labels file.', required=True)
  parser.add_argument(
      '--threshold',
      help='Score threshold for detected objects.',
      required=False,
      type=float,
      default=0.4)
  args = parser.parse_args()

  labels = load_labels(args.labels)
  interpreter = Interpreter(args.model)
  interpreter.allocate_tensors()
  _, input_height, input_width, _ = interpreter.get_input_details()[0]['shape']

  with picamera.PiCamera(
      resolution=(CAMERA_WIDTH, CAMERA_HEIGHT), framerate=30) as camera:
    camera.start_preview()
    try:
      stream = io.BytesIO()
      annotator = Annotator(camera)
      for _ in camera.capture_continuous(
          stream, format='jpeg', use_video_port=True):
        stream.seek(0)
        image = Image.open(stream).convert('RGB').resize(
            (input_width, input_height), Image.ANTIALIAS)
        start_time = time.monotonic()
        results = detect_objects(interpreter, image, args.threshold)
        elapsed_ms = (time.monotonic() - start_time) * 1000
        annotator.clear()
        annotate_objects(annotator, results, labels)
        
        try:
            print(results[0]['class_id'])
            if results[0]['class_id'] in [46.0, 16.0, 17.0]:
                if results[0]['class_id'] == 46.0:
                    print("Cup!!!!")
                    blink.found_cup()
                    
                if results[0]['class_id'] == 16.0:
                    print("Cat !!!!")
                    blink.found_cat()

                if results[0]['class_id'] == 17.0:
                    print("Dog !!!!")
                    blink.found_dog()

        except:
          print("")

        annotator.text([5, 0], '%.1fms' % (elapsed_ms))
        annotator.update()

        stream.seek(0)
        stream.truncate()

    finally:
      camera.stop_preview()
Example #23
0
def main():
    global location, card_R, input_state, direction_count
    clock = 0
    TurnOffTopMotor()
    TurnOffBotMotor()
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--model',
                        help='File path of .tflite file.',
                        required=True)
    parser.add_argument('--labels',
                        help='File path of labels file.',
                        required=True)
    parser.add_argument('--threshold',
                        help='Score threshold for detected objects.',
                        required=False,
                        type=float,
                        default=0.4)
    args = parser.parse_args()

    labels = load_labels(args.labels)
    interpreter = Interpreter(args.model)
    interpreter.allocate_tensors()
    _, input_height, input_width, _ = interpreter.get_input_details(
    )[0]['shape']
    with picamera.PiCamera(resolution=(CAMERA_WIDTH, CAMERA_HEIGHT),
                           framerate=30) as camera:
        camera.start_preview()
        try:
            stream = io.BytesIO()
            annotator = Annotator(camera)
            for _ in camera.capture_continuous(stream,
                                               format='jpeg',
                                               use_video_port=True):
                if clock % 2 == 0:
                    TurnOnBotMotor()
                    time.sleep(0.052125)
                    TurnOffBotMotor()
                    direction_count += 1
                stream.seek(0)
                image = Image.open(stream).convert('RGB').resize(
                    (input_width, input_height), Image.ANTIALIAS)
                start_time = time.monotonic()
                results = detect_objects(interpreter, image, args.threshold)

                elapsed_ms = (time.monotonic() - start_time) * 1000
                annotator.clear()
                annotate_objects(annotator, results, labels)
                annotator.text([5, 0], '%.1fms' % (elapsed_ms))
                annotator.update()
                stream.seek(0)
                stream.truncate()
                clock = clock + 1
                if location > 3:
                    location = 3
                    tmp = direction[0]
                    direction[0] = direction[1] - direction[0]
                    direction[1] = direction[2] - direction[1]
                    direction[2] = direction[3] - direction[2]
                    direction[3] = tmp + 0.834 - direction[3]
                    print(tmp, direction[0], direction[1], direction[2],
                          direction[3])
                    while True:
                        #input_state = GPIO.input(MONITOR_PIN)
                        if card_R > 0:
                            TurnOnBotMotor()
                            #m =sensor.get_magnet()
                            time.sleep(direction[location])
                            if card_R == 52:
                                time.sleep(tmp)
                                #if  (m[0] - position[location][0]) < 600 and (m[1] - position[location][1]) < 600:
                            TurnOffBotMotor()
                            time.sleep(0.5)
                            TurnOnTopMotor()
                            while GPIO.input(MONITOR_PIN) == 1:
                                print(tmp, direction[0], direction[1],
                                      direction[2], direction[3])
                            TurnOffTopMotor()
                            time.sleep(0.5)
                            card_R -= 1
                            location += 1
                            if location > 3:
                                location = 0
                        else:
                            break
        finally:
            camera.stop_preview()
            GPIO.cleanup()