def run(callback, use_appsrc=False):
    parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--mirror', help='flip video horizontally', action='store_true')
    parser.add_argument('--model', help='.tflite model path.', required=False)
    parser.add_argument('--res', help='Resolution', default='640x480',
                        choices=['480x360', '640x480', '1280x720'])
    parser.add_argument('--videosrc', help='Which video source to use', default='/dev/video0')
    parser.add_argument('--h264', help='Use video/x-h264 input', action='store_true')
    args = parser.parse_args()

    default_model = 'models/posenet_mobilenet_v1_075_%d_%d_quant_decoder_edgetpu.tflite'
    if args.res == '480x360':
        src_size = (640, 480)
        appsink_size = (480, 360)
        model = args.model or default_model % (353, 481)
    elif args.res == '640x480':
        src_size = (640, 480)
        appsink_size = (640, 480)
        model = args.model or default_model % (481, 641)
    elif args.res == '1280x720':
        src_size = (1280, 720)
        appsink_size = (1280, 720)
        model = args.model or default_model % (721, 1281)

#    print('Loading model: ', model)
#    engine = PoseEngine(model, mirror=args.mirror)
    gstreamer.run_pipeline(callback,
                           src_size, appsink_size,
                           use_appsrc=use_appsrc, mirror=args.mirror,
                           videosrc=args.videosrc, h264input=args.h264)
Пример #2
0
def run(inf_callback, render_callback):
    global t
    if t >= 3:
        return
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--mirror',
                        help='flip video horizontally',
                        action='store_true')
    parser.add_argument('--model', help='.tflite model path.', required=False)
    parser.add_argument('--res',
                        help='Resolution',
                        default='640x480',
                        choices=['480x360', '640x480', '1280x720'])
    parser.add_argument('--videosrc',
                        help='Which video source to use',
                        default='/dev/video0')
    parser.add_argument('--h264',
                        help='Use video/x-h264 input',
                        action='store_true')
    parser.add_argument('--jpeg',
                        help='Use image/jpeg input',
                        action='store_true')
    args = parser.parse_args()

    default_model = 'models/mobilenet/posenet_mobilenet_v1_075_%d_%d_quant_decoder_edgetpu.tflite'
    if args.res == '480x360':
        src_size = (480, 360)
        appsink_size = (480, 360)
        model = args.model or default_model % (353, 481)
    elif args.res == '640x480':
        src_size = (640, 480)
        appsink_size = (640, 480)
        model = args.model or default_model % (481, 641)
    elif args.res == '1280x720':
        src_size = (1280, 720)
        appsink_size = (1280, 720)
        model = args.model or default_model % (721, 1281)

    print('Loading model: ', model)
    engine = PoseEngine(model)
    input_shape = engine.get_input_tensor_shape()
    inference_size = (input_shape[2], input_shape[1])

    gstreamer.run_pipeline(
        partial(inf_callback, engine),
        partial(render_callback, engine),
        src_size,
        inference_size,
        #    mirror=args.mirror,
        mirror=True,
        videosrc=args.videosrc,
        h264=args.h264,
        jpeg=args.jpeg)
Пример #3
0
def main():
    found = set()
    decode = BarCodeDecoder()

    def user_callback(image, svg_canvas):
        for decoded in decode.decode(image):
            text = "{} - {} ({})".format(datetime.datetime.now(),
                                         decoded['data'], decoded['type'])
            print(text)
            if decoded['data'] not in found:
                found.add(decoded['data'])

    gstreamer.run_pipeline(user_callback)
def run(inf_callback, render_callback):
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--mirror',
                        help='flip video horizontally',
                        action='store_true')
    parser.add_argument('--model', help='.tflite model path.', required=False)
    parser.add_argument('--res',
                        help='Resolution',
                        default='640x480',
                        choices=['480x360', '640x480', '1280x720'])
    parser.add_argument('--videosrc',
                        help='Which video source to use',
                        default='/dev/video0')
    parser.add_argument('--h264',
                        help='Use video/x-h264 input',
                        action='store_true')
    parser.add_argument('--jpeg',
                        help='Use image/jpeg input',
                        action='store_true')
    args = parser.parse_args()

    default_model = 'models/posenet_mobilenet_v1_075_%d_%d_quant.tflite'
    if args.res == '480x360':
        src_size = (640, 480)
        model = args.model or default_model % (353, 481)
    elif args.res == '640x480':
        src_size = (640, 480)
        model = args.model or default_model % (481, 641)
    elif args.res == '1280x720':
        src_size = (1280, 720)
        model = args.model or default_model % (721, 1281)

    print('Loading model: ', model)
    engine = PoseEngine(model,
                        offsetRefineStep=10,
                        scoreThreshold=0.8,
                        maxPoseDetections=5,
                        nmsRadius=30,
                        minPoseConfidence=0.15)
    input_shape = engine.get_input_tensor_shape()
    inference_size = (input_shape[2], input_shape[1])

    gstreamer.run_pipeline(partial(inf_callback, engine),
                           partial(render_callback, engine),
                           src_size,
                           inference_size,
                           mirror=args.mirror,
                           videosrc=args.videosrc,
                           h264=args.h264,
                           jpeg=args.jpeg)
Пример #5
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--mirror',
                        help='flip video horizontally',
                        action='store_true')
    parser.add_argument('--model', help='.tflite model path.', required=False)
    parser.add_argument('--res',
                        help='Resolution',
                        default='640x480',
                        choices=['480x360', '640x480', '1280x720'])
    parser.add_argument('--videosrc',
                        help='Which video source to use',
                        default='/dev/video0')
    parser.add_argument('--videotgt',
                        help='Where to write the video to',
                        default='autovideosink')
    parser.add_argument('--anonymize',
                        help='Use anonymizer mode',
                        action='store_true')
    parser.add_argument('--jpg',
                        help='Use image/jpeg input',
                        action='store_true')
    args = parser.parse_args()

    default_model = 'models/posenet_mobilenet_v1_075_%d_%d_quant_decoder_edgetpu.tflite'
    if args.res == '480x360':
        src_size = (640, 480)
        appsink_size = (480, 360)
        model = args.model or default_model % (353, 481)
    elif args.res == '640x480':
        src_size = (640, 480)
        appsink_size = (640, 480)
        model = args.model or default_model % (481, 641)
    elif args.res == '1280x720':
        src_size = (1280, 720)
        appsink_size = (1280, 720)
        model = args.model or default_model % (721, 1281)

    print('Loading model: ', model)
    engine = PoseEngine(model, mirror=args.mirror)
    gstreamer.run_pipeline(Callback(engine, anonymize=args.anonymize),
                           src_size,
                           appsink_size,
                           use_appsrc=True,
                           mirror=args.mirror,
                           videosrc=args.videosrc,
                           jpginput=args.jpg,
                           videotgt=args.videotgt)
Пример #6
0
def main():
    model = '../traffic_sign_detection/road_signs_quantized_v2_edgetpu.tflite'
    labels = '../traffic_sign_detection/road_sign_labels.txt'

    print("Loading %s with %s labels."%(model, labels))
    engine = DetectionEngine(model)
    labels = load_labels(labels)

    last_time = time.monotonic()
    def user_callback(image, svg_canvas):
      #nonlocal last_time
      start_time = time.monotonic()
      objs = engine.DetectWithImage(image, threshold=0.1, keep_aspect_ratio=True, relative_coord=True, top_k=5)
      end_time = time.monotonic()
      text_lines = [
          'Inference: %.2f ms' %((end_time - start_time) * 1000),
          'FPS: %.2f fps' %(1.0/(end_time - last_time)),
      ]
      #print(' '.join(text_lines))
      last_time = end_time
      generate_svg(svg_canvas, objs, labels, text_lines)
      #status = cv.imwrite('output_image.png', np.array(image))
      output_uart()
      angles = input_output(image)
      if (angles == False):
          print ("no angles found")
      else:
          #continue
          print (angles, traffic_sign)
    
    result = gstreamer.run_pipeline(user_callback)
Пример #7
0
def run(callback, use_appsrc=False):
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument("--mirror",
                        help="flip video horizontally",
                        action="store_true")
    parser.add_argument("--model", help=".tflite model path.", required=False)
    parser.add_argument(
        "--res",
        help="Resolution",
        default="1280x720",
        choices=["480x360", "640x480", "1280x720"],
    )
    parser.add_argument("--videosrc",
                        help="Which video source to use",
                        default="/dev/video0")
    parser.add_argument("--h264",
                        help="Use video/x-h264 input",
                        action="store_true")
    args = parser.parse_args()

    default_model = "models/posenet_mobilenet_v1_075_%d_%d_quant_decoder_edgetpu.tflite"
    if args.res == "480x360":
        src_size = (640, 480)
        appsink_size = (480, 360)
        model = args.model or default_model % (353, 481)
    elif args.res == "640x480":
        src_size = (640, 480)
        appsink_size = (640, 480)
        model = args.model or default_model % (481, 641)
    elif args.res == "1280x720":
        src_size = (1280, 720)
        appsink_size = (1280, 720)
        model = args.model or default_model % (721, 1281)

    print("Loading model: ", model)
    engine = PoseEngine(model, mirror=args.mirror)
    gstreamer.run_pipeline(
        partial(callback, engine),
        src_size,
        appsink_size,
        use_appsrc=use_appsrc,
        mirror=args.mirror,
        videosrc=args.videosrc,
        h264input=args.h264,
    )
Пример #8
0
def main():
    parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--mirror', help='flip video horizontally', action='store_true')
    parser.add_argument('--model', help='.tflite model path.', required=False)
    parser.add_argument('--width', help='Source width', default='640')
    parser.add_argument('--height', help='Source height', default='480')
    parser.add_argument('--videosrc', help='Which video source to use', default='/dev/video0')

    parser.add_argument('--anonymize', dest='anonymize', action='store_true', help='Use anonymizer mode [--noanonymize]')
    parser.add_argument('--noanonymize', dest='anonymize', action='store_false', help=argparse.SUPPRESS)
    parser.set_defaults(anonymize=False)

    parser.add_argument('--bodyparts', dest='bodyparts', action='store_true', help='Color by bodyparts [--nobodyparts]')
    parser.add_argument('--nobodyparts', dest='bodyparts', action='store_false', help=argparse.SUPPRESS)
    parser.set_defaults(bodyparts=True)

    parser.add_argument('--h264', help='Use video/x-h264 input', action='store_true')
    parser.add_argument('--jpeg', help='Use video/jpeg input', action='store_true')
    args = parser.parse_args()

    if args.h264 and args.jpeg:
        print('Error: both mutually exclusive options h264 and jpeg set')
        sys.exit(1)

    default_model = 'models/bodypix_mobilenet_v1_075_640_480_16_quant_edgetpu_decoder.tflite'
    model = args.model if args.model else default_model
    print('Model: {}'.format(model))

    engine = PoseEngine(model)
    inference_size = (engine.image_width, engine.image_height)
    print('Inference size: {}'.format(inference_size))

    src_size = (int(args.width), int(args.height))
    if args.videosrc.startswith('/dev/video'):
        print('Source size: {}'.format(src_size))

    gstreamer.run_pipeline(Callback(engine,
                                    anonymize=args.anonymize,
                                    bodyparts=args.bodyparts),
                           src_size, inference_size,
                           mirror=args.mirror,
                           videosrc=args.videosrc,
                           h264=args.h264,
                           jpeg=args.jpeg)
def main():
    default_model_dir = 'model'
    default_model = 'ssdlite_mobiledet_quant_postprocess_edgetpu.tflite'
    parser = argparse.ArgumentParser()
    parser.add_argument('--model',
                        help='.tflite model path',
                        default=os.path.join(default_model_dir, default_model))
    parser.add_argument(
        '--top_k',
        type=int,
        default=3,
        help='number of categories with highest score to display')
    parser.add_argument('--threshold',
                        type=float,
                        default=0.5,
                        help='classifier score threshold')
    parser.add_argument('--videosrc',
                        help='Which video source to use. ',
                        default='/dev/video0')
    parser.add_argument('--videofmt',
                        help='Input video format.',
                        default='raw',
                        choices=['raw', 'h264', 'jpeg'])
    args = parser.parse_args()

    interpreter = common.make_interpreter(args.model)
    interpreter.allocate_tensors()

    w, h, _ = common.input_image_size(interpreter)
    inference_size = (w, h)
    # Average fps over last 30 frames.
    fps_counter = common.avg_fps_counter(100)

    def user_callback(input_tensor, src_size, inference_box):
        nonlocal fps_counter
        start_time = time.monotonic()
        common.set_input(interpreter, input_tensor)
        interpreter.invoke()
        # For larger input image sizes, use the edgetpu.classification.engine for better performance
        objs = get_output(interpreter, args.threshold, args.top_k)
        end_time = time.monotonic()
        text_lines = [
            'Inference: {:.2f} ms'.format((end_time - start_time) * 1000),
            'FPS: {} fps'.format(round(next(fps_counter))),
        ]
        print(' '.join(text_lines))
        return generate_svg(src_size, inference_size, inference_box, objs,
                            text_lines)

    result = gstreamer.run_pipeline(user_callback,
                                    src_size=(640, 480),
                                    appsink_size=inference_size,
                                    videosrc=args.videosrc,
                                    videofmt=args.videofmt)
Пример #10
0
def main():
    default_model_dir = '../all_models'
    default_model = 'mobilenet_v2_1.0_224_quant_edgetpu.tflite'
    default_labels = 'imagenet_labels.txt'
    parser = argparse.ArgumentParser()
    parser.add_argument('--model', help='.tflite model path',
                        default=os.path.join(default_model_dir,default_model))
    parser.add_argument('--labels', help='label file path',
                        default=os.path.join(default_model_dir, default_labels))
    parser.add_argument('--top_k', type=int, default=3,
                        help='number of categories with highest score to display')
    parser.add_argument('--threshold', type=float, default=0.1,
                        help='classifier score threshold')
    parser.add_argument('--videosrc', help='Which video source to use. ',
                        default='/dev/video0')
    parser.add_argument('--videofmt', help='Input video format.',
                        default='raw',
                        choices=['raw', 'h264', 'jpeg'])
    args = parser.parse_args()

    print('Loading {} with {} labels.'.format(args.model, args.labels))
    interpreter = common.make_interpreter(args.model)
    interpreter.allocate_tensors()
    labels = load_labels(args.labels)

    w, h, _  = common.input_image_size(interpreter)
    inference_size = (w, h)
    # Average fps over last 30 frames.
    fps_counter = common.avg_fps_counter(30)

    def user_callback(input_tensor, src_size, inference_box):
        nonlocal fps_counter
        start_time = time.monotonic()
        common.set_input(interpreter, input_tensor)
        interpreter.invoke()
        # For larger input image sizes, use the edgetpu.classification.engine for better performance
        results = get_output(interpreter, args.top_k, args.threshold)
        end_time = time.monotonic()
        text_lines = [
            ' ',
            'Inference: {:.2f} ms'.format((end_time - start_time) * 1000),
            'FPS: {} fps'.format(round(next(fps_counter))),
        ]
        for result in results:
            text_lines.append('score={:.2f}: {}'.format(result.score, labels.get(result.id, result.id)))
        print(' '.join(text_lines))
        return generate_svg(src_size, text_lines)

    result = gstreamer.run_pipeline(user_callback,
                                    src_size=(640, 480),
                                    appsink_size=inference_size,
                                    videosrc=args.videosrc,
                                    videofmt=args.videofmt)
Пример #11
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--bucket', help='GCS bucket name')
    parser.add_argument('--path', help='GCS path prefix for uploading images')
    args = parser.parse_args()

    print('Press enter or button when you want to take a picture')

    input_monitor = InputMonitor(gpio_pin=8)

    def user_callback(image, svg_canvas):
        nonlocal input_monitor

        if input_monitor.is_key_pressed():
            upload.upload(args.bucket, args.path, image)

    input_monitor.daemon = True
    input_monitor.start()
    print('monitoring keyboard input...')

    gstreamer.run_pipeline(user_callback)
Пример #12
0
def main():
    """Creates camera pipeline, and pushes pipeline through ClassificationEngine
    model. Logs results to user-defined storage. Runs either in training mode to
    gather images for custom model creation or in deterrent mode that sounds an
    'alarm' if a defined label is detected."""
    args = user_selections()
    print("Loading %s with %s labels." % (args.model, args.labels))
    engine = ClassificationEngine(args.model)
    labels = load_labels(args.labels)
    storage_dir = args.storage

    #Initialize logging file
    logging.basicConfig(filename='%s/results.log' % storage_dir,
                        format='%(asctime)s-%(message)s',
                        level=logging.DEBUG)

    last_time = time.monotonic()
    last_results = [('label', 0)]

    def user_callback(image, svg_canvas):
        nonlocal last_time
        nonlocal last_results
        start_time = time.monotonic()
        results = engine.classify_with_image(image,
                                             threshold=args.threshold,
                                             top_k=args.top_k)
        end_time = time.monotonic()
        results = [(labels[i], score) for i, score in results]

        if args.print:
            print_results(start_time, last_time, end_time, results)

        if args.training:
            if do_training(results, last_results, args.top_k):
                save_data(image, results, storage_dir)
        else:
            #Custom model mode:
            #The labels can be modified to detect/deter user-selected items
            if results[0][0] != 'background':
                save_data(image, storage_dir, results)
            if 'fox squirrel, eastern fox squirrel, Sciurus niger' in results:
                if args.sound is not None:
                    playsound(args.sound)
                logging.info('Deterrent sounded')

        last_results = results
        last_time = end_time

    # Note: we don't use the framerate paramter because our USB cam doesn't like it
    result = gstreamer.run_pipeline(user_callback,
                                    device='/dev/video1',
                                    src_caps='video/x-raw,format=YUY2')
Пример #13
0
def main():
    default_model_dir = '../all_models'
    default_model = 'mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite'
    default_labels = 'coco_labels.txt'
    parser = argparse.ArgumentParser()
    parser.add_argument('--model', help='.tflite model path',
                        default=os.path.join(default_model_dir,default_model))
    parser.add_argument('--labels', help='label file path',
                        default=os.path.join(default_model_dir, default_labels))
    parser.add_argument('--top_k', type=int, default=10,
                        help='number of classes with highest score to display')
    parser.add_argument('--threshold', type=float, default=0.3,
                        help='class score threshold')
    args = parser.parse_args()

    print("Loading %s with %s labels."%(args.model, args.labels))
    engine = DetectionEngine(args.model)
    labels = load_labels(args.labels)

    last_time = time.monotonic()
    def user_callback(image, svg_canvas):
      nonlocal last_time
      start_time = time.monotonic()
      objs = engine.DetectWithImage(image, threshold=args.threshold,
                                    keep_aspect_ratio=True, relative_coord=True,
                                    top_k=args.top_k)
      end_time = time.monotonic()
      text_lines = [
          'Inference: %.2f ms' %((end_time - start_time) * 1000),
          'FPS: %.2f fps' %(1.0/(end_time - last_time)),
      ]
      print(' '.join(text_lines))
      last_time = end_time
      
      # keep only relevant classes, e.g. person, car, truck, train, stop sign, traffic lights, etc.
      objs = [obj for obj in objs if obj.label_id in [0,1,2,3,5,6,7,9,12]]
      # non max suppression
      objs.sort(key=lambda x: x.score)
      keep = []
      while objs:
          max_score_obj = objs.pop()
          keep.append(max_score_obj)
          tmp = []
          for obj in objs:
              if iou(obj.bounding_box.flatten().tolist(),max_score_obj.bounding_box.flatten().tolist()) < 0.1:
                  tmp.append(obj)
          objs = tmp
          
      generate_svg(svg_canvas, keep, labels, text_lines)

    result = gstreamer.run_pipeline(user_callback)
Пример #14
0
def main():
    default_model_dir = '../models'
    default_model = 'mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite'
    default_labels = 'coco_labels.txt'
    parser = argparse.ArgumentParser()
    parser.add_argument('--model',
                        help='.tflite model path',
                        default=os.path.join(default_model_dir, default_model))
    parser.add_argument('--labels',
                        help='label file path',
                        default=os.path.join(default_model_dir,
                                             default_labels))
    parser.add_argument('--top_k',
                        type=int,
                        default=3,
                        help='number of classes with highest score to display')
    parser.add_argument('--threshold',
                        type=float,
                        default=0.4,
                        help='class score threshold')
    args = parser.parse_args()

    # load the label file
    print("Loading %s with %s labels." % (args.model, args.labels))
    engine = DetectionEngine(args.model)
    labels = load_labels(args.labels)

    last_time = time.monotonic()

    def user_callback(image, svg_canvas):
        # get the inference time
        nonlocal last_time
        start_time = time.monotonic()
        # perform the image detection
        objs = engine.detect_with_image(image,
                                        threshold=args.threshold,
                                        keep_aspect_ratio=True,
                                        relative_coord=True,
                                        top_k=args.top_k)
        end_time = time.monotonic()
        text_lines = [
            'Inference: %.2f ms' % ((end_time - start_time) * 1000),
            'FPS: %.2f fps' % (1.0 / (end_time - last_time)),
        ]
        print(' '.join(text_lines))
        last_time = end_time
        # generates the image for viewing
        generate_svg(svg_canvas, objs, labels, text_lines)

    result = gstreamer.run_pipeline(user_callback)
Пример #15
0
def main():
    default_model_dir = "../all_models"
    default_model = 'mobilenet_v2_1.0_224_quant_edgetpu.tflite'
    default_labels = 'imagenet_labels.txt'
    parser = argparse.ArgumentParser()
    parser.add_argument('--model',
                        help='.tflite model path',
                        default=os.path.join(default_model_dir, default_model))
    parser.add_argument('--labels',
                        help='label file path',
                        default=os.path.join(default_model_dir,
                                             default_labels))
    parser.add_argument('--top_k',
                        type=int,
                        default=3,
                        help='number of classes with highest score to display')
    parser.add_argument('--threshold',
                        type=float,
                        default=0.1,
                        help='class score threshold')
    args = parser.parse_args()

    print("Loading %s with %s labels." % (args.model, args.labels))
    engine = ClassificationEngine(args.model)
    labels = load_labels(args.labels)

    input_shape = engine.get_input_tensor_shape()
    inference_size = (input_shape[1], input_shape[2])

    # Average fps over last 30 frames.
    fps_counter = common.avg_fps_counter(30)

    def user_callback(input_tensor, src_size, inference_box):
        nonlocal fps_counter
        start_time = time.monotonic()
        results = engine.classify_with_input_tensor(input_tensor,
                                                    threshold=args.threshold,
                                                    top_k=args.top_k)
        end_time = time.monotonic()
        text_lines = [
            'Inference: %.2f ms' % ((end_time - start_time) * 1000),
            'FPS: %d fps' % (round(next(fps_counter))),
        ]
        for index, score in results:
            text_lines.append('score=%.2f: %s' % (score, labels[index]))
        print(' '.join(text_lines))
        return generate_svg(src_size, text_lines)

    result = gstreamer.run_pipeline(user_callback, appsink_size=inference_size)
Пример #16
0
def main():
    default_model_dir = "../all_models"
    default_model = 'mobilenet_v2_1.0_224_quant_edgetpu.tflite'
    default_labels = 'imagenet_labels.txt'
    parser = argparse.ArgumentParser()
    parser.add_argument('--model',
                        help='.tflite model path',
                        default=os.path.join(default_model_dir, default_model))
    parser.add_argument('--labels',
                        help='label file path',
                        default=os.path.join(default_model_dir,
                                             default_labels))
    parser.add_argument('--top_k',
                        type=int,
                        default=3,
                        help='number of classes with highest score to display')
    parser.add_argument('--threshold',
                        type=float,
                        default=0.1,
                        help='class score threshold')
    args = parser.parse_args()

    print("Loading %s with %s labels." % (args.model, args.labels))
    engine = ClassificationEngine(args.model)
    labels = load_labels(args.labels)

    last_time = time.monotonic()

    def user_callback(image, svg_canvas):
        nonlocal last_time
        start_time = time.monotonic()
        results = engine.ClassifyWithImage(image,
                                           threshold=args.threshold,
                                           top_k=args.top_k)
        end_time = time.monotonic()
        text_lines = [
            'Inference: %.2f ms' % ((end_time - start_time) * 1000),
            'FPS: %.2f fps' % (1.0 / (end_time - last_time)),
        ]
        for index, score in results:
            text_lines.append('score=%.2f: %s' % (score, labels[index]))
        print(' '.join(text_lines))
        last_time = end_time
        generate_svg(svg_canvas, text_lines)

    result = gstreamer.run_pipeline(user_callback)
Пример #17
0
def main(args):
    parser = argparse.ArgumentParser()
    parser.add_argument('--model', help='File path of Tflite model.',
                        default='models/mobilenet_quant_v1_224_headless_edgetpu.tflite')
    parser.add_argument('--testui', dest='testui', action='store_true',
                        help='Run test of UI. Ctrl-C to abort.')
    parser.add_argument('--keyboard', dest='keyboard', action='store_true',
                        help='Run test of UI. Ctrl-C to abort.')
    parser.add_argument('--method', dest='method',
                        help='method for transfer learning, support knn or imprinting',
                        default='knn',
                        choices=['knn', 'imprinting'])
    parser.add_argument('--outputmodel', help='File path of output Tflite model, only for imprinting method.',
                        default='output.tflite')
    parser.add_argument('--keepclasses', dest='keepclasses', action='store_true',
                        help='Whether to keep base model classes, only for imprinting method.')
    args = parser.parse_args()

    # The UI differs a little depending on the system because the GPIOs
    # are a little bit different.
    print('Initialize UI.')
    platform = detectPlatform()
    if args.keyboard:
      ui = UI_Keyboard()
    else:
      if platform == 'raspberry': ui = UI_Raspberry()
      elif platform == 'devboard': ui = UI_EdgeTpuDevBoard()
      else:
        print('No GPIOs detected - falling back to Keyboard input')
        ui = UI_Keyboard()

    ui.wiggleLEDs()
    if args.testui:
        ui.testButtons()
        return

    print('Initialize Model...')
    if args.method == 'knn':
      teachable = TeachableMachineKNN(args.model, ui)
    else:
      teachable = TeachableMachineImprinting(args.model, ui, args.outputmodel, args.keepclasses)

    print('Start Pipeline.')
    result = gstreamer.run_pipeline(teachable.classify)

    ui.wiggleLEDs(4)
Пример #18
0
def main(args):
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--model',
        help='File path of Tflite model.',
        default='mobilenet_quant_v1_224_headless_edgetpu.tflite')
    parser.add_argument('--testui',
                        dest='testui',
                        action='store_true',
                        help='Run test of UI. Ctrl-C to abort.')
    parser.add_argument('--keyboard',
                        dest='keyboard',
                        action='store_true',
                        help='Run test of UI. Ctrl-C to abort.')
    args = parser.parse_args()

    # The UI differs a little depending on the system because the GPIOs
    # are a little bit different.
    print('Initialize UI.')
    platform = detectPlatform()
    if args.keyboard:
        ui = UI_Keyboard()
    else:
        if platform == 'raspberry': ui = UI_Raspberry()
        elif platform == 'devboard': ui = UI_EdgeTpuDevBoard()
        else:
            print('No GPIOs detected - falling back to Keyboard input')
            ui = UI_Keyboard()

    ui.wiggleLEDs()
    if args.testui:
        ui.testButtons()
        return

    print('Initialize Model...')
    teachable = TeachableMachine(args.model, ui)

    print('Start Pipeline.')
    result = gstreamer.run_pipeline(teachable.classify)

    ui.wiggleLEDs(4)
    teachable._servoFlag.set()
    time.sleep(1)
    teachable._servoFlag.clear()
Пример #19
0
def main():
    default_model_dir = '../all_models'
    default_model = 'mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite'
    default_labels = 'coco_labels.txt'
    parser = argparse.ArgumentParser()
    parser.add_argument('--model', help='.tflite model path',
                        default=os.path.join(default_model_dir,default_model))
    parser.add_argument('--labels', help='label file path',
                        default=os.path.join(default_model_dir, default_labels))
    parser.add_argument('--top_k', type=int, default=3,
                        help='number of categories with highest score to display')
    parser.add_argument('--threshold', type=float, default=0.1,
                        help='classifier score threshold')
    args = parser.parse_args()

    print('Loading {} with {} labels.'.format(args.model, args.labels))
    interpreter = common.make_interpreter(args.model)
    interpreter.allocate_tensors()
    labels = load_labels(args.labels)

    w, h, _ = common.input_image_size(interpreter)
    inference_size = (w, h)
    # Average fps over last 30 frames.
    fps_counter  = common.avg_fps_counter(30)

    def user_callback(input_tensor, src_size, inference_box):
      nonlocal fps_counter
      start_time = time.monotonic()
      common.set_input(interpreter, input_tensor)
      interpreter.invoke()
      # For larger input image sizes, use the edgetpu.classification.engine for better performance
      objs = get_output(interpreter, args.threshold, args.top_k)
      end_time = time.monotonic()
      text_lines = [
          'Inference: {:.2f} ms'.format((end_time - start_time) * 1000),
          'FPS: {} fps'.format(round(next(fps_counter))),
      ]
      print(' '.join(text_lines))
      return generate_svg(src_size, inference_size, inference_box, objs, labels, text_lines)

    result = gstreamer.run_pipeline(user_callback, appsink_size=inference_size)
Пример #20
0
def main(args):
    parser = argparse.ArgumentParser()
    parser.add_argument('--model', help='File path of Tflite model.')
    parser.add_argument('--testui',
                        dest='testui',
                        action='store_true',
                        help='Run test of UI. Ctrl-C to abort.')
    args = parser.parse_args()

    # The UI differs a little depending on the system because the GPIOs
    # are a little bit different.
    print('Initialize UI.')
    platform = detectPlatform()
    if platform == 'raspberry': ui = UI_Raspberry()
    else:
        raise ValueError('Unsupported platform: %s ' % platform +
                         'This Demo is for Raspberry Pi.')

    ui.wiggleLEDs()
    if args.testui:
        ui.testButtons()
        return

    print('Initialize Model...')
    teachable = TeachableMachine(args.model, ui)

    assert os.path.isfile(args.model)

    print('Start Pipeline.')

    def user_callback(img, overlay):
        return teachable.classify(img, overlay)

    # TODO(mtyka) Refactor this once offial gstreamer.py is
    # programmable and supports rpi. Then get rid of our custom
    # gstreamer.py
    result = gstreamer.run_pipeline(user_callback, platform)

    ui.wiggleLEDs(4)
    return 0 if teachable.clean_shutdown else 1
Пример #21
0
def main():
    # default_model_dir = '../all_models'
    # default_model = 'mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite'
    # default_labels = 'coco_labels.txt'

    default_model_dir = '../cmpe297_model'
    # default_model = 'ssdlite_6C_SB_10K_mobiledet_screws.tflite'   #5 classes small BB
    # default_model = 'ssdlite_6C_SB_10K_mobiledet_screws_edgetpu.tflite' #5 classes small BB
    # default_model = 'ssdlite_6C_SB_25K_mobiledet_screws.tflite' #5 classes small BB
    default_model = 'ssdlite_6C_SB_25K_mobiledet_screws_edgetpu.tflite'  #5 classes small BB
    # default_model = 'ssdlite_6C_BB_10K_mobiledet_screws.tflite'  #5 classes big BB 1K
    # default_model = 'ssdlite_6C_BB_10K_mobiledet_screws_edgetpu.tflite'  #5 classes big BB 1K
    default_labels = 'ssdlite_mobiledet_screws_6c_labels.txt'

    # default_model = 'ssdlite_2C_BB_10K_mobiledet_screws.tflite'  #5 classes big BB 1K
    # default_model = 'ssdlite_2C_BB_10K_mobiledet_screws_edgetpu.tflite'  #5 classes big BB 1K
    # default_labels = 'ssdlite_mobiledet_screws_2c_labels.txt'

    # default_model_dir = '../cmpe297_model'
    # default_model = 'Sergio_v3_ssdlite_mobiledet_dog_vs_cat.tflite'
    # # default_model = 'Sergio_v3_sdlite_mobiledet_dog_vs_cat_edgetpu.tflite'
    # default_labels = 'cat_vs_doc_All.txt'

    # default_model = 'mobilenet_v2_1.0_224_quant_edgetpu_cmpe297.tflite'
    # # default_model = 'mobilenet_v2_1.0_224_quant_cmpe297.tflite'
    # default_labels = 'flower_labels_cmpe297.txt'

    # default_model = 'eager_mobilenet_v2_1.0_224_quant.tflite'  #no edgeTPU
    # default_model = 'eager_mobilenet_v2_1.0_224_quant_edgetpu.tflite'  #eager
    #
    # default_model = 'eager2_mobilenet_v2_1.0_224_quant.tflite'  #eager
    # default_model = 'eager2_mobilenet_v2_1.0_224_quant_edgetpu.tflite'  #eager
    # default_labels = 'duckylabels.txt'

    # default_model = 'quant_coco-tiny-v3-relu.tflite'
    # default_model = 'quant_coco-tiny-v3-relu_edgetpu.tflite'

    # default_model = 'ssdlite_mobiledet_dog_vs_cat_edgetpu.tflite'
    # default_labels = 'cat_vs_doc.txt'

    # default_model = 'cmpe297_ssdlite_mobiledet_dog.tflite'
    # default_model = 'cmpe297_ssdlite_mobiledet_dog_edgetpu.tflite'
    # default_model = 'cmpe297v2_ssdlite_mobiledet_dog_edgetpu.tflite'
    # default_labels = 'dogs_labels.txt'

    # default_model = 'ssdlite_mobiledet_dog_vs_cat_edgetpuAcha.tflite'
    # default_labels = 'cat_vs_doc_All.txt'

    parser = argparse.ArgumentParser()
    parser.add_argument('--model',
                        help='.tflite model path',
                        default=os.path.join(default_model_dir, default_model))
    parser.add_argument('--labels',
                        help='label file path',
                        default=os.path.join(default_model_dir,
                                             default_labels))
    parser.add_argument(
        '--top_k',
        type=int,
        default=3,
        help='number of categories with highest score to display')
    parser.add_argument('--threshold',
                        type=float,
                        default=0.1,
                        help='classifier score threshold')
    parser.add_argument('--videosrc',
                        help='Which video source to use. ',
                        default='/dev/video0')
    parser.add_argument('--videofmt',
                        help='Input video format.',
                        default='raw',
                        choices=['raw', 'h264', 'jpeg'])
    args = parser.parse_args()

    print('Loading {} with {} labels.'.format(args.model, args.labels))
    interpreter = common.make_interpreter(args.model)
    interpreter.allocate_tensors()
    labels = load_labels(args.labels)

    w, h, _ = common.input_image_size(interpreter)
    inference_size = (w, h)
    # Average fps over last 30 frames.
    fps_counter = common.avg_fps_counter(30)

    def user_callback(input_tensor, src_size, inference_box):
        nonlocal fps_counter
        start_time = time.monotonic()
        common.set_input(interpreter, input_tensor)
        interpreter.invoke()
        # For larger input image sizes, use the edgetpu.classification.engine for better performance
        objs = get_output(interpreter, args.threshold, args.top_k)
        #   print(objs[0].bbox)
        end_time = time.monotonic()
        text_lines = [
            'Inference: {:.2f} ms'.format((end_time - start_time) * 1000),
            'FPS: {} fps'.format(round(next(fps_counter))),
        ]
        print(' '.join(text_lines))
        return generate_svg(src_size, inference_size, inference_box, objs,
                            labels, text_lines)

    result = gstreamer.run_pipeline(user_callback,
                                    src_size=(640, 480),
                                    appsink_size=inference_size,
                                    videosrc=args.videosrc,
                                    videofmt=args.videofmt)
Пример #22
0
def main():

    #///-----------------------------------------------------------\\\
    #//                    Scanning Image                           \\

    def user_callback(input_tensor, src_size, inference_box):
        global access
        global house
        global parcel
        nonlocal fps_counter
        start_time = time.monotonic()
        common.set_input(interpreter, input_tensor)
        interpreter.invoke()
        # For larger input image sizes, use the edgetpu.classification.engine for better performance
        results = get_output(interpreter, args.top_k, args.threshold)
        end_time = time.monotonic()
        text_lines = [
            ' ',
            'Inference: {:.2f} ms'.format((end_time - start_time) * 1000),
            'FPS: {} fps'.format(round(next(fps_counter))),
        ]
        for result in results:
            text_lines.append('score={:.2f}: {}'.format(
                result.score, labels.get(result.id, result.id)))
            if gpio6.read() == True:
                access = 2
                Gtk.main_quit()
            elif house:
                if labels.get(
                        result.id, result.id
                ) == "tree frog, tree-frog" and result.score > 0.3:
                    access = 1
                    Gtk.main_quit()
                elif (labels.get(result.id, result.id) == "acoustic guitar"
                      or labels.get(result.id, result.id) == "jigsaw puzzle"
                      or labels.get(result.id, result.id) == "jellyfish"
                      or labels.get(result.id, result.id) == "basketball"
                      or labels.get(result.id, result.id)
                      == "soccer ball") and result.score > 0.3:
                    access = 0
                    Gtk.main_quit()
            elif parcel:
                if labels.get(
                        result.id,
                        result.id) == "acoustic guitar" and result.score > 0.3:
                    access = 1
                    Gtk.main_quit()
                elif (labels.get(result.id,
                                 result.id) == "tree frog, tree-frog"
                      or labels.get(result.id, result.id) == "jigsaw puzzle"
                      or labels.get(result.id, result.id) == "jellyfish"
                      or labels.get(result.id, result.id) == "basketball"
                      or labels.get(result.id, result.id)
                      == "soccer ball") and result.score > 0.3:
                    access = 0
                    Gtk.main_quit()

        print(' '.join(text_lines))
        return generate_svg(src_size, text_lines)


#\\                                                             //
#\\\-----------------------------------------------------------///

    while (1):
        global access
        global answer
        global house
        global parcel

        gpio7.write(True)
        gpio8.write(True)
        while (gpio6.read() == False):  #  Waiting for signal
            time.sleep(0.05)
        time.sleep(2)

        # Setting up voice recogniton
        parser = argparse.ArgumentParser()
        model.add_model_flags(parser)
        args = parser.parse_args()
        interpreter = model.make_interpreter(args.model_file)
        interpreter.allocate_tensors()
        mic = args.mic if args.mic is None else int(args.mic)
        model.classify_audio(
            mic,
            interpreter,
            1,  # Calling Listening Function
            labels_file="config/labels_gc2.raw.txt",
            result_callback=print_results,
            sample_rate_hz=int(args.sample_rate_hz),
            num_frames_hop=int(args.num_frames_hop))

        if answer == 3:  # Timed out
            answer = 0
            house = False
            parcel = False
        elif answer == 1:  # Yes
            gpio8.write(True)
            gpio7.write(False)
            while (gpio6.read() == False):
                time.sleep(0.05)
            gpio7.write(True)
            answer = 0
            house = True
            parcel = False

        elif answer == 2:  # No
            gpio8.write(False)
            gpio7.write(False)
            while (gpio6.read() == False):
                time.sleep(0.05)
            gpio7.write(True)
            answer = 0
            house = False
            time.sleep(1)
            model.classify_audio(
                mic,
                interpreter,
                2,  # Calling Listening Function
                labels_file="config/labels_gc2.raw.txt",
                result_callback=print_results,
                sample_rate_hz=int(args.sample_rate_hz),
                num_frames_hop=int(args.num_frames_hop))
            if answer == 3:  # Timed out
                answer = 0
                parcel = False
            elif answer == 1:  # Yes
                gpio8.write(True)
                gpio7.write(False)
                while (gpio6.read() == False):
                    time.sleep(0.05)
                gpio7.write(True)
                answer = 0
                parcel = True
            elif answer == 2:  # No
                gpio8.write(False)
                gpio7.write(False)
                while (gpio6.read() == False):
                    time.sleep(0.05)
                gpio7.write(True)
                answer = 0
                parcel = False
        if house or parcel:
            # Setting up image recogniton
            default_model_dir = '../all_models'
            default_model = 'mobilenet_v2_1.0_224_quant_edgetpu.tflite'
            default_labels = 'imagenet_labels.txt'
            parser = argparse.ArgumentParser()
            parser.add_argument('--model',
                                help='.tflite model path',
                                default=os.path.join(default_model_dir,
                                                     default_model))
            parser.add_argument('--labels',
                                help='label file path',
                                default=os.path.join(default_model_dir,
                                                     default_labels))
            parser.add_argument(
                '--top_k',
                type=int,
                default=3,
                help='number of categories with highest score to display')
            parser.add_argument('--threshold',
                                type=float,
                                default=0.1,
                                help='classifier score threshold')
            parser.add_argument('--videosrc',
                                help='Which video source to use. ',
                                default='/dev/video0')
            parser.add_argument('--videofmt',
                                help='Input video format.',
                                default='raw',
                                choices=['raw', 'h264', 'jpeg'])
            args = parser.parse_args()

            print('Loading {} with {} labels.'.format(args.model, args.labels))
            interpreter = common.make_interpreter(args.model)
            interpreter.allocate_tensors()
            labels = load_labels(args.labels)

            w, h, _ = common.input_image_size(interpreter)
            inference_size = (w, h)
            # Average fps over last 30 frames.
            fps_counter = common.avg_fps_counter(30)
            result = gstreamer.run_pipeline(
                user_callback,  # Calling Scanning Image Function
                src_size=(640, 480),
                appsink_size=inference_size,
                videosrc=args.videosrc,
                videofmt=args.videofmt)

            # Communication with ESP32 Board
            if access == 1:
                gpio8.write(True)
                gpio7.write(False)
                while (gpio6.read() == False):
                    time.sleep(0.05)
                gpio7.write(True)
            elif access == 0:
                gpio8.write(False)
                gpio7.write(False)
                while (gpio6.read() == False):
                    time.sleep(0.05)
                gpio7.write(True)

        time.sleep(2)
Пример #23
0
def main():
    default_model_dir = 'models'
    default_model = 'mobilenet_ssd_v2_face_quant_postprocess_edgetpu.tflite'
    default_labels = 'face_labels.txt'
    parser = argparse.ArgumentParser()
    parser.add_argument('--model',
                        help='.tflite model path',
                        default=os.path.join(default_model_dir, default_model))
    parser.add_argument('--labels',
                        help='label file path',
                        default=os.path.join(default_model_dir,
                                             default_labels))
    parser.add_argument('--top_k',
                        type=int,
                        default=5,
                        help='number of classes with highest score to display')
    parser.add_argument('--threshold',
                        type=float,
                        default=0.1,
                        help='class score threshold')

    args = parser.parse_args()

    print("Loading %s with %s labels." % (args.model, args.labels))
    engine = DetectionEngine(args.model)
    labels = load_labels(args.labels)

    last_time = time.monotonic()

    def user_callback(image, svg_canvas):
        nonlocal last_time
        start_time = time.monotonic()
        objs = engine.DetectWithImage(image,
                                      threshold=args.threshold,
                                      keep_aspect_ratio=True,
                                      relative_coord=True,
                                      top_k=args.top_k)
        end_time = time.monotonic()

        if objs:
            print()
            for obj in objs:
                if labels:
                    print(labels[obj.label_id], 'score = ', obj.score)
                else:
                    print('score = ', obj.score)
                [x1, y1, x2, y2] = obj.bounding_box.flatten().tolist()
                print(x1, y1, x2, y2)
                # calculate pixel coords
                pix_x = (x1 + x2) * 320  # 640/2 = 320
                pix_y = (y1 + y2) * 240  # 480/2 = 240
                # calculate angles with respect to center
                # TODO: an accurate parameter replacing 480 needs to be calculated
                yaw = math.atan((pix_x - 640./2) / 480) * \
                    1800 / math.pi + YAW_MID
                pitch = math.atan((pix_y - 480./2) / 480) * \
                    1800 / math.pi + PITCH_MID
                serial.yaw = yaw
                serial.pitch = pitch
        else:
            print('No object detected!')

        text_lines = [
            'Inference: %.2f ms' % ((end_time - start_time) * 1000),
            'FPS: %.2f fps' % (1.0 / (end_time - last_time)),
        ]
        print(' '.join(text_lines))
        last_time = end_time
        generate_svg(svg_canvas, objs, labels, text_lines)

    result = gstreamer.run_pipeline(user_callback)
Пример #24
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--model', help='.tflite model file')
    parser.add_argument('--labels', help='.txt label file')
    parser.add_argument('--threshold',
                        help='Class Score Threshold',
                        type=float,
                        default=0.7)
    parser.add_argument('--top_k', help='Class Top K', type=int, default=2)
    parser.add_argument('--manual',
                        help='Take a picture when key pressed',
                        action='store_true')
    args = parser.parse_args()

    engine = ClassificationEngine(args.model)
    labels = load_labels(args.labels)

    input_monitor = InputMonitor(gpio_pin=8)
    led = LED(gpio_r=6, gpio_g=7, gpio_b=None, invert=True)
    led.switch_off_all()
    light_duration = 3 if args.manual else 0.1

    last_time = time.monotonic()

    if args.manual:
        input_monitor.daemon = True
        input_monitor.start()

    def user_callback(image, svg_canvas):
        nonlocal last_time

        if args.manual:
            if not input_monitor.is_key_pressed():
                return

        start_time = time.monotonic()
        results = engine.ClassifyWithImage(image,
                                           threshold=0.1,
                                           top_k=args.top_k)
        end_time = time.monotonic()

        text_lines = [
            'Inference: %.2f ms' % ((end_time - start_time) * 1000),
            'FPS: %.2f fps' % (1.0 / (end_time - last_time)),
        ]

        if len(results) == 0:
            led.switch_off_all()
        else:
            results.sort(key=lambda result: result[1], reverse=True)
            for index, score in results:
                text_lines.append('score=%.2f: %s' % (score, labels[index]))

            top_label = labels[results[0][0]]
            top_score = results[0][1]
            if top_score >= args.threshold:
                if top_label == 'roadway_green':
                    led.switch_green(duration=light_duration)
                elif top_label == 'roadway_red':
                    led.switch_red(duration=light_duration)
                elif top_label == 'roadway_yellow':
                    led.switch_yellow(duration=light_duration)
                else:
                    led.switch_off_all()

        last_time = end_time
        print(' '.join(text_lines))
        generate_svg(svg_canvas, text_lines)

    gstreamer.run_pipeline(user_callback)
Пример #25
0
def main():

    def user_callback(input_tensor, src_size, inference_box):
        global access
        global house
        global parcel
        nonlocal fps_counter
        start_time = time.monotonic()
        common.set_input(interpreter, input_tensor)
        interpreter.invoke()
        # For larger input image sizes, use the edgetpu.classification.engine for better performance
        results = get_output(interpreter, args.top_k, args.threshold)
        end_time = time.monotonic()
        text_lines = [
            ' ',
            'Inference: {:.2f} ms'.format((end_time - start_time) * 1000),
            'FPS: {} fps'.format(round(next(fps_counter))),
        ]
        for result in results:
            text_lines.append('score={:.2f}: {}'.format(result.score, labels.get(result.id, result.id)))
            if house:
                if labels.get(result.id, result.id) == "tree frog, tree-frog":
                    access = 1
                    gpio6.write(True)
                    Gtk.main_quit()
                elif labels.get(result.id, result.id) == "acoustic guitar" or labels.get(result.id, result.id) == "jigsaw puzzle" or labels.get(result.id, result.id) == "jellyfish" or labels.get(result.id, result.id) == "basketball" or labels.get(result.id, result.id) == "soccer ball":
                    access = 0
                    gpio73.write(True)
                    Gtk.main_quit()
            elif parcel:
                if labels.get(result.id, result.id) == "acoustic guitar": 
                    access = 1
                    gpio7.write(True)
                    Gtk.main_quit()
                elif labels.get(result.id, result.id) == "tree frog, tree-frog" or labels.get(result.id, result.id) == "jigsaw puzzle" or labels.get(result.id, result.id) == "jellyfish" or labels.get(result.id, result.id) == "basketball" or labels.get(result.id, result.id) == "soccer ball":
                    access = 0
                    gpio8.write(True)
                    Gtk.main_quit()
                
        print(' '.join(text_lines))
        return generate_svg(src_size, text_lines)

    while(1):
        global access
        global answer
        global house
        global parcel
        
        gpio6.write(False)
        gpio73.write(False)
        gpio7.write(False)
        gpio8.write(False)

        motion.start_ranging(VL53L0X.VL53L0X_BETTER_ACCURACY_MODE)
        timing = motion.get_timing()
        if (timing < 20000):
            timing = 20000
        distance = motion.get_distance()
        while(distance > 500):
            distance = motion.get_distance()
            time.sleep(timing/1000000.00)
        motion.stop_ranging()

        wave_obj = sa.WaveObject.from_wave_file("welcome.wav")
        play_obj = wave_obj.play()
        play_obj.wait_done()
        wave_obj = sa.WaveObject.from_wave_file("entry.wav")
        play_obj = wave_obj.play()
        play_obj.wait_done()
        
        # Voice Recognition
        parser = argparse.ArgumentParser()
        model.add_model_flags(parser)
        args = parser.parse_args()
        interpreter = model.make_interpreter(args.model_file)
        interpreter.allocate_tensors()
        mic = args.mic if args.mic is None else int(args.mic)
        model.classify_audio(mic, interpreter, 1,
                     labels_file="config/labels_gc2.raw.txt",
                     result_callback=print_results,
                     sample_rate_hz=int(args.sample_rate_hz),
                     num_frames_hop=int(args.num_frames_hop))
        if answer == 1:
            wave_obj = sa.WaveObject.from_wave_file("key.wav")
            play_obj = wave_obj.play()
            play_obj.wait_done()
            answer = 0
            house = True
            parcel = False
        elif answer == 2:
            wave_obj = sa.WaveObject.from_wave_file("package.wav")
            play_obj = wave_obj.play()
            play_obj.wait_done()
            answer = 0
            house = False
            # Voice Recognition
            model.classify_audio(mic, interpreter, 2,
                        labels_file="config/labels_gc2.raw.txt",
                        result_callback=print_results,
                        sample_rate_hz=int(args.sample_rate_hz),
                        num_frames_hop=int(args.num_frames_hop))
            if answer == 1:
                wave_obj = sa.WaveObject.from_wave_file("key.wav")
                play_obj = wave_obj.play()
                play_obj.wait_done()
                answer = 0
                parcel = True
            elif answer == 2:
                wave_obj = sa.WaveObject.from_wave_file("goodday.wav")
                play_obj = wave_obj.play()
                play_obj.wait_done()
                answer = 0
                parcel = False
        if house or parcel:
            default_model_dir = '../all_models'
            default_model = 'mobilenet_v2_1.0_224_quant_edgetpu.tflite'
            default_labels = 'imagenet_labels.txt'
            parser = argparse.ArgumentParser()
            parser.add_argument('--model', help='.tflite model path',
                                default=os.path.join(default_model_dir,default_model))
            parser.add_argument('--labels', help='label file path',
                                default=os.path.join(default_model_dir, default_labels))
            parser.add_argument('--top_k', type=int, default=3,
                                help='number of categories with highest score to display')
            parser.add_argument('--threshold', type=float, default=0.1,
                                help='classifier score threshold')
            parser.add_argument('--videosrc', help='Which video source to use. ',
                                default='/dev/video0')
            parser.add_argument('--videofmt', help='Input video format.',
                                default='raw',
                                choices=['raw', 'h264', 'jpeg'])
            args = parser.parse_args()

            print('Loading {} with {} labels.'.format(args.model, args.labels))
            interpreter = common.make_interpreter(args.model)
            interpreter.allocate_tensors()
            labels = load_labels(args.labels)

            w, h, _  = common.input_image_size(interpreter)
            inference_size = (w, h)
            # Average fps over last 30 frames.
            fps_counter = common.avg_fps_counter(30)
            result = gstreamer.run_pipeline(user_callback,
                                        src_size=(640, 480),
                                        appsink_size=inference_size,
                                        videosrc=args.videosrc,
                                        videofmt=args.videofmt)
            if access:
                if house:
                    wave_obj = sa.WaveObject.from_wave_file("stay.wav")
                elif parcel:
                    wave_obj = sa.WaveObject.from_wave_file("parcel.wav")
                play_obj = wave_obj.play()
                play_obj.wait_done()
            else:
                wave_obj = sa.WaveObject.from_wave_file("denied.wav")
                play_obj = wave_obj.play()
                play_obj.wait_done()
        
        time.sleep(3)
Пример #26
0
def main():
    default_model_dir = '../all_models'
    default_model = 'mobilenet_ssd_v2_face_quant_postprocess_edgetpu.tflite'
    default_labels = 'fer_labels.txt'
    parser = argparse.ArgumentParser()
    parser.add_argument('--model',
                        help='.tflite model path',
                        default=os.path.join(default_model_dir, default_model))
    parser.add_argument('--labels',
                        help='label file path',
                        default=os.path.join(default_model_dir,
                                             default_labels))
    parser.add_argument(
        '--top_k',
        type=int,
        default=1,
        help='number of categories with highest score to display')
    parser.add_argument('--threshold',
                        type=float,
                        default=0.1,
                        help='classifier score threshold')
    parser.add_argument('--videosrc',
                        help='Which video source to use. ',
                        default='/dev/video0')
    parser.add_argument('--videofmt',
                        help='Input video format.',
                        default='raw',
                        choices=['raw', 'h264', 'jpeg'])
    args = parser.parse_args()

    print('Loading {} with {} labels.'.format(args.model, args.labels))
    face_interpreter = common.make_interpreter(
        os.path.join(default_model_dir, default_model))
    face_interpreter.allocate_tensors()
    # fer interpreter
    fer_interpreter = common.make_interpreter(args.model)
    fer_interpreter.allocate_tensors()
    labels = load_labels(args.labels)

    w, h, _ = common.input_image_size(face_interpreter)
    inference_size = (w, h)
    # Average fps over last 30 frames.
    fps_counter = common.avg_fps_counter(30)

    def user_callback(input_tensor, src_size, inference_box):
        nonlocal fps_counter
        start_time = time.monotonic()
        common.set_input(face_interpreter, input_tensor)
        face_interpreter.invoke()
        # For larger input image sizes, use the edgetpu.classification.engine for better performance
        objs = get_output(face_interpreter, args.threshold, args.top_k)
        # Get face detected part
        from PIL import Image
        im = Image.fromarray(common.input_tensor(face_interpreter))
        src_w, src_h = src_size
        inf_w, inf_h = inference_size
        results = []
        emo_objs = []
        for obj in objs:
            x0, y0, x1, y1 = list(obj.bbox)
            # Relative coordinates.
            x, y, w, h = x0, y0, x1 - x0, y1 - y0
            # Absolute coordinates, input tensor space.
            x, y, w, h = int(x * inf_w), int(y * inf_h), int(w * inf_w), int(
                h * inf_h)
            crop_rectangle = (x, y, x + w, y + h)
            # get face
            face = im.crop(crop_rectangle)
            face = np.array(face)
            # convert to grayscale
            #face = cv2.cvtColor(face, cv2.COLOR_RGB2GRAY)
            print(face.shape)
            face = cv2.resize(face, (224, 224))
            face = face.astype(np.uint8)
            #face /= float(face.max())
            face = np.reshape(face.flatten(), (224, 224, 3))
            # invoke fer interpreter
            common.set_input2(fer_interpreter, face)
            fer_interpreter.invoke()
            # process results
            results = get_emotion(fer_interpreter)
            if len(results) > 0:
                setattr(obj, "id", results[0].id)
                setattr(obj, "score", results[0].score)
                emo_objs.append(obj)
        objs = emo_objs
        end_time = time.monotonic()

        text_lines = []
        if len(objs) > 0:
            text_lines = [
                'Inference: {:.2f} ms'.format((end_time - start_time) * 1000),
                'FPS: {} fps'.format(round(next(fps_counter))),
            ]
            for result in results:
                text_lines.append('score={:.2f}: {}'.format(
                    result.score, labels.get(result.id, result.id)))
            #print(' '.join(text_lines))
        return generate_svg(src_size, inference_size, inference_box, objs,
                            labels, text_lines)

    result = gstreamer.run_pipeline(user_callback,
                                    src_size=(640, 480),
                                    appsink_size=inference_size,
                                    videosrc=args.videosrc,
                                    videofmt=args.videofmt)
def main():
    """Creates camera pipeline, and pushes pipeline through ClassificationEngine
    model. Logs results to user-defined storage. Runs either in training mode to
    gather images for custom model creation or in deterrent mode that sounds an
    'alarm' if a defined label is detected."""
    args = user_selections()
    print("Loading %s with %s labels." % (args.model, args.labels))
    engine = ClassificationEngine(args.model)
    labels = load_labels(args.labels)
    storage_dir = args.storage

    #Initialize logging files
    logging.basicConfig(filename='%s/results.log' % storage_dir,
                        format='%(asctime)s-%(message)s',
                        level=logging.DEBUG)

    last_time = time.monotonic()
    last_results = [('label', 0)]
    last_tweet = None

    def user_callback(image, svg_canvas):
        nonlocal last_time
        nonlocal last_results
        nonlocal last_tweet

        start_time = time.monotonic()
        results = engine.classify_with_image(image,
                                             threshold=args.threshold,
                                             top_k=args.top_k)
        end_time = time.monotonic()
        results = [(labels[i], score) for i, score in results]

        if args.print:
            print_results(start_time, last_time, end_time, results)

        if args.training:
            print("training mode")
            if do_training(results, last_results, args.top_k):
                save_data(image, results, storage_dir)
        else:
            print("looking for birds")
            # Custom model mode:
            # Save the images if the label is one of the targets and its probability is relatively high
            if results[0][1] >= 0.8:
                filename = save_data(image, results, storage_dir)
                if (last_tweet is None) or ((time.time() - last_tweet > 300)
                                            and results[0][1] >= 0.9):
                    try:
                        #imageFile = take_a_picture(storage_dir)
                        status = "I'm %d percent sure this is a %s. #ai" % (
                            results[0][1] * 100, results[0][0])
                        logging.info('Trying to tweet : %s', status)
                        logging.info('Reading file %s', filename)
                        tweet(status, filename)
                        last_tweet = time.time()
                    except:
                        logging.exception('Failed to send tweet')
                        last_tweet = None

        last_results = results
        last_time = end_time

    result = gstreamer.run_pipeline(user_callback)
Пример #28
0
def main():
    default_model_dir = 'models'
    # default_model = '2019_05_13_whole(1)/output_tflite_graph_1557776948_edgetpu.tflite'
    # default_model = '2/output_tflite_graph_edgetpu.tflite'
    # default_model = '3/output_tflite_graph_edgetpu.tflite'
    default_model = '4/output_tflite_graph_edgetpu.tflite'
    # more to come...
    default_labels = 'armor_plate_labels.txt'
    # default_model = 'mobilenet_ssd_v2_face_quant_postprocess_edgetpu.tflite'
    # default_labels = 'face_labels.txt'
    parser = argparse.ArgumentParser()
    parser.add_argument('-d', action="store_true", help='enable debug mode')
    parser.add_argument('--model',
                        help='.tflite model path',
                        default=os.path.join(default_model_dir, default_model))
    parser.add_argument('--labels',
                        help='label file path',
                        default=os.path.join(default_model_dir,
                                             default_labels))
    parser.add_argument('--top_k',
                        type=int,
                        default=5,
                        help='number of classes with highest score to display')
    parser.add_argument('--threshold',
                        type=float,
                        default=0.1,
                        help='class score threshold')

    args = parser.parse_args()

    if args.d:
        global DEBUG
        DEBUG = True
        print("Debug mode is on!")
    print("Loading model:", default_model)
    print("Loading labels:", default_labels)
    engine = DetectionEngine(args.model)
    labels = load_labels(args.labels)

    last_time = time.monotonic()

    try:
        dev = slcan.slcanBus(PORT[0], bitrate=1000000)
        dev.open()
        print('Connection found at port', PORT[0])
    except:
        dev = slcan.slcanBus(PORT[1], bitrate=1000000)
        dev.open()
        print('Connection found at port', PORT[1])
    # finally:
    #     dev = None
    #     print('No connection found but still running')
    #     pass

    yaw = YAW_MID
    pitch = PITCH_MID

    def user_callback(image, svg_canvas):
        nonlocal last_time
        start_time = time.monotonic()
        objs = engine.DetectWithImage(image,
                                      threshold=args.threshold,
                                      keep_aspect_ratio=True,
                                      relative_coord=True,
                                      top_k=args.top_k)
        end_time = time.monotonic()

        obj = choose_obj(objs, start_time)
        if obj:
            # if labels:
            #     print(labels[obj.label_id], 'score = ', obj.score)
            # else:
            #     print('score = ', obj.score)
            [x1, y1, x2, y2] = obj.bounding_box.flatten().tolist()
            # calculate pixel coords
            pix_x = (x1 + x2) * X_PIXEL / 2  # 640/2 = 320
            pix_y = (y1 + y2) * Y_PIXEL / 2  # 480/2 = 240
            # calculate angles with respect to center
            yaw = math.atan((pix_x - X_PIXEL/2) / CAMERA_PARAM) * \
                1800 / math.pi + YAW_MID
            pitch = math.atan((pix_y - Y_PIXEL/2) / CAMERA_PARAM) * \
                1800 / math.pi + PITCH_MID
            sendMessage(dev, yaw, pitch)
        else:
            print('No object detected!')
            sendMessage(dev, DEFAULT_YAW, DEFAULT_PITCH)

        text_lines = [
            'Inference: %.2f ms' % ((end_time - start_time) * 1000),
            'SPF: %.2f ms' % ((end_time - last_time) * 1000),
            'FPS: %.2f fps' % (1.0 / (end_time - last_time)),
        ]
        print(' '.join(text_lines))
        last_time = end_time
        if DEBUG:
            generate_svg(svg_canvas, objs, labels, text_lines)

    result = gstreamer.run_pipeline(DEBUG, user_callback)
Пример #29
0
def main():
    parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--mirror', help='flip video horizontally', action='store_true')
    parser.add_argument('--model', help='.tflite model path.', required=False)
    parser.add_argument('--res', help='Resolution', default='640x480',
                        choices=['480x360', '640x480', '1280x720'])
    parser.add_argument('--videosrc', help='Which video source to use', default='/dev/video0')
    parser.add_argument('--h264', help='Use video/x-h264 input', action='store_true')
    parser.add_argument('--jpeg', help='Use image/jpeg input', action='store_true')
    args = parser.parse_args()
    #cambiar el modelo

    default_model = 'pose_detection.tflite'
    if args.res == '480x360':
        src_size = (640, 480)
        appsink_size = (480, 360)
        #model = args.model or default_model % (353, 481)
    elif args.res == '640x480':
        src_size = (640, 480)
        appsink_size = (640, 480)
        #model = args.model or default_model % (481, 641)
    elif args.res == '1280x720':
        src_size = (1280, 720)
        appsink_size = (1280, 720)
        #model = args.model or default_model % (721, 1281)
    model=default_model
    n = 0
    sum_process_time = 0
    sum_inference_time = 0
    ctr = 0
    fps_counter  = avg_fps_counter(30)
    #Engine contiene el modelo de CNN
    input_tensor=engine.run_inference(input_tensor)

    print('Loading model: ', model)
    engine = PoseEngine(model)
    input_shape = engine.get_input_tensor_shape()
    inference_size = (input_shape[2], input_shape[1])

    def render_overlay(engine, output, src_size, inference_box):
        nonlocal n, sum_process_time, sum_inference_time, fps_counter

        svg_canvas = svgwrite.Drawing('', size=src_size)
        start_time = time.monotonic()
        outputs, inference_time = engine.ParseOutput(output)
        end_time = time.monotonic()
        n += 1
        sum_process_time += 1000 * (end_time - start_time)
        sum_inference_time += inference_time

        avg_inference_time = sum_inference_time / n
        text_line = 'PoseNet: %.1fms (%.2f fps) TrueFPS: %.2f Nposes %d' % (
            avg_inference_time, 1000 / avg_inference_time, next(fps_counter), len(outputs)
        )

        shadow_text(svg_canvas, 10, 20, text_line)
        for pose in outputs:
            draw_pose(svg_canvas, pose, src_size, inference_box)
        return (svg_canvas.tostring(), False)
    pose= gstreamer.run_pipeline(input_tensor, render_overlay,
                           src_size, inference_size,
                           mirror=args.mirror,
                           videosrc=args.videosrc,
                           h264=args.h264,
                           jpeg=args.jpeg)
Пример #30
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--project', help='GCP Project')
    parser.add_argument('--bucket', help='GCS bucket name')
    parser.add_argument('--path', help='GCS path prefix for uploading images')
    parser.add_argument('--region', help='GCP Region')
    parser.add_argument('--registry_id', help='IoT Core Registry ID')
    parser.add_argument('--device_id', help='IoT Core Device ID')
    parser.add_argument('--private_key', help='IoT Core Private Key File')
    parser.add_argument('--algorithm', help='IoT Core JWT Algorithm',
                        default='RS256')
    parser.add_argument('--ca_certs', help='IoT Core roots.pem',
                        default='roots.pem')
    parser.add_argument('--mqtt_host', help='IoT Core hostname',
                        default='mqtt.googleapis.com')
    parser.add_argument('--mqtt_port', help='IoT Core port',
                        type=int,
                        default=443)
    args = parser.parse_args()

    input_monitor = InputMonitor(gpio_pin=8)
    led = LED(gpio_r=6, gpio_g=7, gpio_b=None, invert=True)
    led.switch_off_all()

    def user_callback(image, svg_canvas):
        nonlocal input_monitor

        if input_monitor.is_key_pressed():
            upload.upload(args.bucket, args.path, image)

    input_monitor.daemon = True
    input_monitor.start()
    print('monitoring keyboard input...')

    mqtt.setup_mqtt_client(
        args.project,
        args.registry_id,
        args.private_key,
        args.device_id,
        args.region,
        args.algorithm,
        args.ca_certs,
        args.mqtt_host,
        args.mqtt_port)

    def message_callback(payload):
        try:
            preds = json.loads(payload)
            preds.sort(key=lambda pred: pred['class_score'], reverse=True)
            top = preds[0]['class_name']
            if top == 'roadway_green':
                led.switch_green(duration=3)
            elif top == 'roadway_red':
                led.switch_red(duration=3)
            elif top == 'roadway_yellow':
                led.switch_yellow(duration=3)
            else:
                led.switch_off_all()
        except Exception as ex:
            print(ex)

    mqtt.add_message_callback(message_callback)

    gstreamer.run_pipeline(user_callback)