def main():
    default_model_dir = 'model'
    default_model = 'ssdlite_mobiledet_quant_postprocess_edgetpu.tflite'
    parser = argparse.ArgumentParser()
    parser.add_argument('--model',
                        help='.tflite model path',
                        default=os.path.join(default_model_dir, default_model))
    parser.add_argument(
        '--top_k',
        type=int,
        default=3,
        help='number of categories with highest score to display')
    parser.add_argument('--threshold',
                        type=float,
                        default=0.5,
                        help='classifier score threshold')
    parser.add_argument('--videosrc',
                        help='Which video source to use. ',
                        default='/dev/video0')
    parser.add_argument('--videofmt',
                        help='Input video format.',
                        default='raw',
                        choices=['raw', 'h264', 'jpeg'])
    args = parser.parse_args()

    interpreter = common.make_interpreter(args.model)
    interpreter.allocate_tensors()

    w, h, _ = common.input_image_size(interpreter)
    inference_size = (w, h)
    # Average fps over last 30 frames.
    fps_counter = common.avg_fps_counter(100)

    def user_callback(input_tensor, src_size, inference_box):
        nonlocal fps_counter
        start_time = time.monotonic()
        common.set_input(interpreter, input_tensor)
        interpreter.invoke()
        # For larger input image sizes, use the edgetpu.classification.engine for better performance
        objs = get_output(interpreter, args.threshold, args.top_k)
        end_time = time.monotonic()
        text_lines = [
            'Inference: {:.2f} ms'.format((end_time - start_time) * 1000),
            'FPS: {} fps'.format(round(next(fps_counter))),
        ]
        print(' '.join(text_lines))
        return generate_svg(src_size, inference_size, inference_box, objs,
                            text_lines)

    result = gstreamer.run_pipeline(user_callback,
                                    src_size=(640, 480),
                                    appsink_size=inference_size,
                                    videosrc=args.videosrc,
                                    videofmt=args.videofmt)
Пример #2
0
def main():
    default_model_dir = '../all_models'
    default_model = 'mobilenet_v2_1.0_224_quant_edgetpu.tflite'
    default_labels = 'imagenet_labels.txt'
    parser = argparse.ArgumentParser()
    parser.add_argument('--model', help='.tflite model path',
                        default=os.path.join(default_model_dir,default_model))
    parser.add_argument('--labels', help='label file path',
                        default=os.path.join(default_model_dir, default_labels))
    parser.add_argument('--top_k', type=int, default=3,
                        help='number of categories with highest score to display')
    parser.add_argument('--threshold', type=float, default=0.1,
                        help='classifier score threshold')
    parser.add_argument('--videosrc', help='Which video source to use. ',
                        default='/dev/video0')
    parser.add_argument('--videofmt', help='Input video format.',
                        default='raw',
                        choices=['raw', 'h264', 'jpeg'])
    args = parser.parse_args()

    print('Loading {} with {} labels.'.format(args.model, args.labels))
    interpreter = common.make_interpreter(args.model)
    interpreter.allocate_tensors()
    labels = load_labels(args.labels)

    w, h, _  = common.input_image_size(interpreter)
    inference_size = (w, h)
    # Average fps over last 30 frames.
    fps_counter = common.avg_fps_counter(30)

    def user_callback(input_tensor, src_size, inference_box):
        nonlocal fps_counter
        start_time = time.monotonic()
        common.set_input(interpreter, input_tensor)
        interpreter.invoke()
        # For larger input image sizes, use the edgetpu.classification.engine for better performance
        results = get_output(interpreter, args.top_k, args.threshold)
        end_time = time.monotonic()
        text_lines = [
            ' ',
            'Inference: {:.2f} ms'.format((end_time - start_time) * 1000),
            'FPS: {} fps'.format(round(next(fps_counter))),
        ]
        for result in results:
            text_lines.append('score={:.2f}: {}'.format(result.score, labels.get(result.id, result.id)))
        print(' '.join(text_lines))
        return generate_svg(src_size, text_lines)

    result = gstreamer.run_pipeline(user_callback,
                                    src_size=(640, 480),
                                    appsink_size=inference_size,
                                    videosrc=args.videosrc,
                                    videofmt=args.videofmt)
Пример #3
0
def main():
    default_model_dir = "../all_models"
    default_model = 'mobilenet_v2_1.0_224_quant_edgetpu.tflite'
    default_labels = 'imagenet_labels.txt'
    parser = argparse.ArgumentParser()
    parser.add_argument('--model',
                        help='.tflite model path',
                        default=os.path.join(default_model_dir, default_model))
    parser.add_argument('--labels',
                        help='label file path',
                        default=os.path.join(default_model_dir,
                                             default_labels))
    parser.add_argument('--top_k',
                        type=int,
                        default=3,
                        help='number of classes with highest score to display')
    parser.add_argument('--threshold',
                        type=float,
                        default=0.1,
                        help='class score threshold')
    args = parser.parse_args()

    print("Loading %s with %s labels." % (args.model, args.labels))
    engine = ClassificationEngine(args.model)
    labels = load_labels(args.labels)

    input_shape = engine.get_input_tensor_shape()
    inference_size = (input_shape[1], input_shape[2])

    # Average fps over last 30 frames.
    fps_counter = common.avg_fps_counter(30)

    def user_callback(input_tensor, src_size, inference_box):
        nonlocal fps_counter
        start_time = time.monotonic()
        results = engine.classify_with_input_tensor(input_tensor,
                                                    threshold=args.threshold,
                                                    top_k=args.top_k)
        end_time = time.monotonic()
        text_lines = [
            'Inference: %.2f ms' % ((end_time - start_time) * 1000),
            'FPS: %d fps' % (round(next(fps_counter))),
        ]
        for index, score in results:
            text_lines.append('score=%.2f: %s' % (score, labels[index]))
        print(' '.join(text_lines))
        return generate_svg(src_size, text_lines)

    result = gstreamer.run_pipeline(user_callback, appsink_size=inference_size)
Пример #4
0
def main():
    default_model_dir = '../all_models'
    default_model = 'mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite'
    default_labels = 'coco_labels.txt'
    parser = argparse.ArgumentParser()
    parser.add_argument('--model', help='.tflite model path',
                        default=os.path.join(default_model_dir,default_model))
    parser.add_argument('--labels', help='label file path',
                        default=os.path.join(default_model_dir, default_labels))
    parser.add_argument('--top_k', type=int, default=3,
                        help='number of categories with highest score to display')
    parser.add_argument('--threshold', type=float, default=0.1,
                        help='classifier score threshold')
    args = parser.parse_args()

    print('Loading {} with {} labels.'.format(args.model, args.labels))
    interpreter = common.make_interpreter(args.model)
    interpreter.allocate_tensors()
    labels = load_labels(args.labels)

    w, h, _ = common.input_image_size(interpreter)
    inference_size = (w, h)
    # Average fps over last 30 frames.
    fps_counter  = common.avg_fps_counter(30)

    def user_callback(input_tensor, src_size, inference_box):
      nonlocal fps_counter
      start_time = time.monotonic()
      common.set_input(interpreter, input_tensor)
      interpreter.invoke()
      # For larger input image sizes, use the edgetpu.classification.engine for better performance
      objs = get_output(interpreter, args.threshold, args.top_k)
      end_time = time.monotonic()
      text_lines = [
          'Inference: {:.2f} ms'.format((end_time - start_time) * 1000),
          'FPS: {} fps'.format(round(next(fps_counter))),
      ]
      print(' '.join(text_lines))
      return generate_svg(src_size, inference_size, inference_box, objs, labels, text_lines)

    result = gstreamer.run_pipeline(user_callback, appsink_size=inference_size)
Пример #5
0
def main():
    # default_model_dir = '../all_models'
    # default_model = 'mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite'
    # default_labels = 'coco_labels.txt'

    default_model_dir = '../cmpe297_model'
    # default_model = 'ssdlite_6C_SB_10K_mobiledet_screws.tflite'   #5 classes small BB
    # default_model = 'ssdlite_6C_SB_10K_mobiledet_screws_edgetpu.tflite' #5 classes small BB
    # default_model = 'ssdlite_6C_SB_25K_mobiledet_screws.tflite' #5 classes small BB
    default_model = 'ssdlite_6C_SB_25K_mobiledet_screws_edgetpu.tflite'  #5 classes small BB
    # default_model = 'ssdlite_6C_BB_10K_mobiledet_screws.tflite'  #5 classes big BB 1K
    # default_model = 'ssdlite_6C_BB_10K_mobiledet_screws_edgetpu.tflite'  #5 classes big BB 1K
    default_labels = 'ssdlite_mobiledet_screws_6c_labels.txt'

    # default_model = 'ssdlite_2C_BB_10K_mobiledet_screws.tflite'  #5 classes big BB 1K
    # default_model = 'ssdlite_2C_BB_10K_mobiledet_screws_edgetpu.tflite'  #5 classes big BB 1K
    # default_labels = 'ssdlite_mobiledet_screws_2c_labels.txt'

    # default_model_dir = '../cmpe297_model'
    # default_model = 'Sergio_v3_ssdlite_mobiledet_dog_vs_cat.tflite'
    # # default_model = 'Sergio_v3_sdlite_mobiledet_dog_vs_cat_edgetpu.tflite'
    # default_labels = 'cat_vs_doc_All.txt'

    # default_model = 'mobilenet_v2_1.0_224_quant_edgetpu_cmpe297.tflite'
    # # default_model = 'mobilenet_v2_1.0_224_quant_cmpe297.tflite'
    # default_labels = 'flower_labels_cmpe297.txt'

    # default_model = 'eager_mobilenet_v2_1.0_224_quant.tflite'  #no edgeTPU
    # default_model = 'eager_mobilenet_v2_1.0_224_quant_edgetpu.tflite'  #eager
    #
    # default_model = 'eager2_mobilenet_v2_1.0_224_quant.tflite'  #eager
    # default_model = 'eager2_mobilenet_v2_1.0_224_quant_edgetpu.tflite'  #eager
    # default_labels = 'duckylabels.txt'

    # default_model = 'quant_coco-tiny-v3-relu.tflite'
    # default_model = 'quant_coco-tiny-v3-relu_edgetpu.tflite'

    # default_model = 'ssdlite_mobiledet_dog_vs_cat_edgetpu.tflite'
    # default_labels = 'cat_vs_doc.txt'

    # default_model = 'cmpe297_ssdlite_mobiledet_dog.tflite'
    # default_model = 'cmpe297_ssdlite_mobiledet_dog_edgetpu.tflite'
    # default_model = 'cmpe297v2_ssdlite_mobiledet_dog_edgetpu.tflite'
    # default_labels = 'dogs_labels.txt'

    # default_model = 'ssdlite_mobiledet_dog_vs_cat_edgetpuAcha.tflite'
    # default_labels = 'cat_vs_doc_All.txt'

    parser = argparse.ArgumentParser()
    parser.add_argument('--model',
                        help='.tflite model path',
                        default=os.path.join(default_model_dir, default_model))
    parser.add_argument('--labels',
                        help='label file path',
                        default=os.path.join(default_model_dir,
                                             default_labels))
    parser.add_argument(
        '--top_k',
        type=int,
        default=3,
        help='number of categories with highest score to display')
    parser.add_argument('--threshold',
                        type=float,
                        default=0.1,
                        help='classifier score threshold')
    parser.add_argument('--videosrc',
                        help='Which video source to use. ',
                        default='/dev/video0')
    parser.add_argument('--videofmt',
                        help='Input video format.',
                        default='raw',
                        choices=['raw', 'h264', 'jpeg'])
    args = parser.parse_args()

    print('Loading {} with {} labels.'.format(args.model, args.labels))
    interpreter = common.make_interpreter(args.model)
    interpreter.allocate_tensors()
    labels = load_labels(args.labels)

    w, h, _ = common.input_image_size(interpreter)
    inference_size = (w, h)
    # Average fps over last 30 frames.
    fps_counter = common.avg_fps_counter(30)

    def user_callback(input_tensor, src_size, inference_box):
        nonlocal fps_counter
        start_time = time.monotonic()
        common.set_input(interpreter, input_tensor)
        interpreter.invoke()
        # For larger input image sizes, use the edgetpu.classification.engine for better performance
        objs = get_output(interpreter, args.threshold, args.top_k)
        #   print(objs[0].bbox)
        end_time = time.monotonic()
        text_lines = [
            'Inference: {:.2f} ms'.format((end_time - start_time) * 1000),
            'FPS: {} fps'.format(round(next(fps_counter))),
        ]
        print(' '.join(text_lines))
        return generate_svg(src_size, inference_size, inference_box, objs,
                            labels, text_lines)

    result = gstreamer.run_pipeline(user_callback,
                                    src_size=(640, 480),
                                    appsink_size=inference_size,
                                    videosrc=args.videosrc,
                                    videofmt=args.videofmt)
Пример #6
0
def main():
    default_model_dir = '../all_models'
    default_model = 'mobilenet_ssd_v2_face_quant_postprocess_edgetpu.tflite'
    default_labels = 'fer_labels.txt'
    parser = argparse.ArgumentParser()
    parser.add_argument('--model',
                        help='.tflite model path',
                        default=os.path.join(default_model_dir, default_model))
    parser.add_argument('--labels',
                        help='label file path',
                        default=os.path.join(default_model_dir,
                                             default_labels))
    parser.add_argument(
        '--top_k',
        type=int,
        default=1,
        help='number of categories with highest score to display')
    parser.add_argument('--threshold',
                        type=float,
                        default=0.1,
                        help='classifier score threshold')
    parser.add_argument('--videosrc',
                        help='Which video source to use. ',
                        default='/dev/video0')
    parser.add_argument('--videofmt',
                        help='Input video format.',
                        default='raw',
                        choices=['raw', 'h264', 'jpeg'])
    args = parser.parse_args()

    print('Loading {} with {} labels.'.format(args.model, args.labels))
    face_interpreter = common.make_interpreter(
        os.path.join(default_model_dir, default_model))
    face_interpreter.allocate_tensors()
    # fer interpreter
    fer_interpreter = common.make_interpreter(args.model)
    fer_interpreter.allocate_tensors()
    labels = load_labels(args.labels)

    w, h, _ = common.input_image_size(face_interpreter)
    inference_size = (w, h)
    # Average fps over last 30 frames.
    fps_counter = common.avg_fps_counter(30)

    def user_callback(input_tensor, src_size, inference_box):
        nonlocal fps_counter
        start_time = time.monotonic()
        common.set_input(face_interpreter, input_tensor)
        face_interpreter.invoke()
        # For larger input image sizes, use the edgetpu.classification.engine for better performance
        objs = get_output(face_interpreter, args.threshold, args.top_k)
        # Get face detected part
        from PIL import Image
        im = Image.fromarray(common.input_tensor(face_interpreter))
        src_w, src_h = src_size
        inf_w, inf_h = inference_size
        results = []
        emo_objs = []
        for obj in objs:
            x0, y0, x1, y1 = list(obj.bbox)
            # Relative coordinates.
            x, y, w, h = x0, y0, x1 - x0, y1 - y0
            # Absolute coordinates, input tensor space.
            x, y, w, h = int(x * inf_w), int(y * inf_h), int(w * inf_w), int(
                h * inf_h)
            crop_rectangle = (x, y, x + w, y + h)
            # get face
            face = im.crop(crop_rectangle)
            face = np.array(face)
            # convert to grayscale
            #face = cv2.cvtColor(face, cv2.COLOR_RGB2GRAY)
            print(face.shape)
            face = cv2.resize(face, (224, 224))
            face = face.astype(np.uint8)
            #face /= float(face.max())
            face = np.reshape(face.flatten(), (224, 224, 3))
            # invoke fer interpreter
            common.set_input2(fer_interpreter, face)
            fer_interpreter.invoke()
            # process results
            results = get_emotion(fer_interpreter)
            if len(results) > 0:
                setattr(obj, "id", results[0].id)
                setattr(obj, "score", results[0].score)
                emo_objs.append(obj)
        objs = emo_objs
        end_time = time.monotonic()

        text_lines = []
        if len(objs) > 0:
            text_lines = [
                'Inference: {:.2f} ms'.format((end_time - start_time) * 1000),
                'FPS: {} fps'.format(round(next(fps_counter))),
            ]
            for result in results:
                text_lines.append('score={:.2f}: {}'.format(
                    result.score, labels.get(result.id, result.id)))
            #print(' '.join(text_lines))
        return generate_svg(src_size, inference_size, inference_box, objs,
                            labels, text_lines)

    result = gstreamer.run_pipeline(user_callback,
                                    src_size=(640, 480),
                                    appsink_size=inference_size,
                                    videosrc=args.videosrc,
                                    videofmt=args.videofmt)
Пример #7
0
def main():

    def user_callback(input_tensor, src_size, inference_box):
        global access
        global house
        global parcel
        nonlocal fps_counter
        start_time = time.monotonic()
        common.set_input(interpreter, input_tensor)
        interpreter.invoke()
        # For larger input image sizes, use the edgetpu.classification.engine for better performance
        results = get_output(interpreter, args.top_k, args.threshold)
        end_time = time.monotonic()
        text_lines = [
            ' ',
            'Inference: {:.2f} ms'.format((end_time - start_time) * 1000),
            'FPS: {} fps'.format(round(next(fps_counter))),
        ]
        for result in results:
            text_lines.append('score={:.2f}: {}'.format(result.score, labels.get(result.id, result.id)))
            if house:
                if labels.get(result.id, result.id) == "tree frog, tree-frog":
                    access = 1
                    gpio6.write(True)
                    Gtk.main_quit()
                elif labels.get(result.id, result.id) == "acoustic guitar" or labels.get(result.id, result.id) == "jigsaw puzzle" or labels.get(result.id, result.id) == "jellyfish" or labels.get(result.id, result.id) == "basketball" or labels.get(result.id, result.id) == "soccer ball":
                    access = 0
                    gpio73.write(True)
                    Gtk.main_quit()
            elif parcel:
                if labels.get(result.id, result.id) == "acoustic guitar": 
                    access = 1
                    gpio7.write(True)
                    Gtk.main_quit()
                elif labels.get(result.id, result.id) == "tree frog, tree-frog" or labels.get(result.id, result.id) == "jigsaw puzzle" or labels.get(result.id, result.id) == "jellyfish" or labels.get(result.id, result.id) == "basketball" or labels.get(result.id, result.id) == "soccer ball":
                    access = 0
                    gpio8.write(True)
                    Gtk.main_quit()
                
        print(' '.join(text_lines))
        return generate_svg(src_size, text_lines)

    while(1):
        global access
        global answer
        global house
        global parcel
        
        gpio6.write(False)
        gpio73.write(False)
        gpio7.write(False)
        gpio8.write(False)

        motion.start_ranging(VL53L0X.VL53L0X_BETTER_ACCURACY_MODE)
        timing = motion.get_timing()
        if (timing < 20000):
            timing = 20000
        distance = motion.get_distance()
        while(distance > 500):
            distance = motion.get_distance()
            time.sleep(timing/1000000.00)
        motion.stop_ranging()

        wave_obj = sa.WaveObject.from_wave_file("welcome.wav")
        play_obj = wave_obj.play()
        play_obj.wait_done()
        wave_obj = sa.WaveObject.from_wave_file("entry.wav")
        play_obj = wave_obj.play()
        play_obj.wait_done()
        
        # Voice Recognition
        parser = argparse.ArgumentParser()
        model.add_model_flags(parser)
        args = parser.parse_args()
        interpreter = model.make_interpreter(args.model_file)
        interpreter.allocate_tensors()
        mic = args.mic if args.mic is None else int(args.mic)
        model.classify_audio(mic, interpreter, 1,
                     labels_file="config/labels_gc2.raw.txt",
                     result_callback=print_results,
                     sample_rate_hz=int(args.sample_rate_hz),
                     num_frames_hop=int(args.num_frames_hop))
        if answer == 1:
            wave_obj = sa.WaveObject.from_wave_file("key.wav")
            play_obj = wave_obj.play()
            play_obj.wait_done()
            answer = 0
            house = True
            parcel = False
        elif answer == 2:
            wave_obj = sa.WaveObject.from_wave_file("package.wav")
            play_obj = wave_obj.play()
            play_obj.wait_done()
            answer = 0
            house = False
            # Voice Recognition
            model.classify_audio(mic, interpreter, 2,
                        labels_file="config/labels_gc2.raw.txt",
                        result_callback=print_results,
                        sample_rate_hz=int(args.sample_rate_hz),
                        num_frames_hop=int(args.num_frames_hop))
            if answer == 1:
                wave_obj = sa.WaveObject.from_wave_file("key.wav")
                play_obj = wave_obj.play()
                play_obj.wait_done()
                answer = 0
                parcel = True
            elif answer == 2:
                wave_obj = sa.WaveObject.from_wave_file("goodday.wav")
                play_obj = wave_obj.play()
                play_obj.wait_done()
                answer = 0
                parcel = False
        if house or parcel:
            default_model_dir = '../all_models'
            default_model = 'mobilenet_v2_1.0_224_quant_edgetpu.tflite'
            default_labels = 'imagenet_labels.txt'
            parser = argparse.ArgumentParser()
            parser.add_argument('--model', help='.tflite model path',
                                default=os.path.join(default_model_dir,default_model))
            parser.add_argument('--labels', help='label file path',
                                default=os.path.join(default_model_dir, default_labels))
            parser.add_argument('--top_k', type=int, default=3,
                                help='number of categories with highest score to display')
            parser.add_argument('--threshold', type=float, default=0.1,
                                help='classifier score threshold')
            parser.add_argument('--videosrc', help='Which video source to use. ',
                                default='/dev/video0')
            parser.add_argument('--videofmt', help='Input video format.',
                                default='raw',
                                choices=['raw', 'h264', 'jpeg'])
            args = parser.parse_args()

            print('Loading {} with {} labels.'.format(args.model, args.labels))
            interpreter = common.make_interpreter(args.model)
            interpreter.allocate_tensors()
            labels = load_labels(args.labels)

            w, h, _  = common.input_image_size(interpreter)
            inference_size = (w, h)
            # Average fps over last 30 frames.
            fps_counter = common.avg_fps_counter(30)
            result = gstreamer.run_pipeline(user_callback,
                                        src_size=(640, 480),
                                        appsink_size=inference_size,
                                        videosrc=args.videosrc,
                                        videofmt=args.videofmt)
            if access:
                if house:
                    wave_obj = sa.WaveObject.from_wave_file("stay.wav")
                elif parcel:
                    wave_obj = sa.WaveObject.from_wave_file("parcel.wav")
                play_obj = wave_obj.play()
                play_obj.wait_done()
            else:
                wave_obj = sa.WaveObject.from_wave_file("denied.wav")
                play_obj = wave_obj.play()
                play_obj.wait_done()
        
        time.sleep(3)
Пример #8
0
def main():

    #///-----------------------------------------------------------\\\
    #//                    Scanning Image                           \\

    def user_callback(input_tensor, src_size, inference_box):
        global access
        global house
        global parcel
        nonlocal fps_counter
        start_time = time.monotonic()
        common.set_input(interpreter, input_tensor)
        interpreter.invoke()
        # For larger input image sizes, use the edgetpu.classification.engine for better performance
        results = get_output(interpreter, args.top_k, args.threshold)
        end_time = time.monotonic()
        text_lines = [
            ' ',
            'Inference: {:.2f} ms'.format((end_time - start_time) * 1000),
            'FPS: {} fps'.format(round(next(fps_counter))),
        ]
        for result in results:
            text_lines.append('score={:.2f}: {}'.format(
                result.score, labels.get(result.id, result.id)))
            if gpio6.read() == True:
                access = 2
                Gtk.main_quit()
            elif house:
                if labels.get(
                        result.id, result.id
                ) == "tree frog, tree-frog" and result.score > 0.3:
                    access = 1
                    Gtk.main_quit()
                elif (labels.get(result.id, result.id) == "acoustic guitar"
                      or labels.get(result.id, result.id) == "jigsaw puzzle"
                      or labels.get(result.id, result.id) == "jellyfish"
                      or labels.get(result.id, result.id) == "basketball"
                      or labels.get(result.id, result.id)
                      == "soccer ball") and result.score > 0.3:
                    access = 0
                    Gtk.main_quit()
            elif parcel:
                if labels.get(
                        result.id,
                        result.id) == "acoustic guitar" and result.score > 0.3:
                    access = 1
                    Gtk.main_quit()
                elif (labels.get(result.id,
                                 result.id) == "tree frog, tree-frog"
                      or labels.get(result.id, result.id) == "jigsaw puzzle"
                      or labels.get(result.id, result.id) == "jellyfish"
                      or labels.get(result.id, result.id) == "basketball"
                      or labels.get(result.id, result.id)
                      == "soccer ball") and result.score > 0.3:
                    access = 0
                    Gtk.main_quit()

        print(' '.join(text_lines))
        return generate_svg(src_size, text_lines)


#\\                                                             //
#\\\-----------------------------------------------------------///

    while (1):
        global access
        global answer
        global house
        global parcel

        gpio7.write(True)
        gpio8.write(True)
        while (gpio6.read() == False):  #  Waiting for signal
            time.sleep(0.05)
        time.sleep(2)

        # Setting up voice recogniton
        parser = argparse.ArgumentParser()
        model.add_model_flags(parser)
        args = parser.parse_args()
        interpreter = model.make_interpreter(args.model_file)
        interpreter.allocate_tensors()
        mic = args.mic if args.mic is None else int(args.mic)
        model.classify_audio(
            mic,
            interpreter,
            1,  # Calling Listening Function
            labels_file="config/labels_gc2.raw.txt",
            result_callback=print_results,
            sample_rate_hz=int(args.sample_rate_hz),
            num_frames_hop=int(args.num_frames_hop))

        if answer == 3:  # Timed out
            answer = 0
            house = False
            parcel = False
        elif answer == 1:  # Yes
            gpio8.write(True)
            gpio7.write(False)
            while (gpio6.read() == False):
                time.sleep(0.05)
            gpio7.write(True)
            answer = 0
            house = True
            parcel = False

        elif answer == 2:  # No
            gpio8.write(False)
            gpio7.write(False)
            while (gpio6.read() == False):
                time.sleep(0.05)
            gpio7.write(True)
            answer = 0
            house = False
            time.sleep(1)
            model.classify_audio(
                mic,
                interpreter,
                2,  # Calling Listening Function
                labels_file="config/labels_gc2.raw.txt",
                result_callback=print_results,
                sample_rate_hz=int(args.sample_rate_hz),
                num_frames_hop=int(args.num_frames_hop))
            if answer == 3:  # Timed out
                answer = 0
                parcel = False
            elif answer == 1:  # Yes
                gpio8.write(True)
                gpio7.write(False)
                while (gpio6.read() == False):
                    time.sleep(0.05)
                gpio7.write(True)
                answer = 0
                parcel = True
            elif answer == 2:  # No
                gpio8.write(False)
                gpio7.write(False)
                while (gpio6.read() == False):
                    time.sleep(0.05)
                gpio7.write(True)
                answer = 0
                parcel = False
        if house or parcel:
            # Setting up image recogniton
            default_model_dir = '../all_models'
            default_model = 'mobilenet_v2_1.0_224_quant_edgetpu.tflite'
            default_labels = 'imagenet_labels.txt'
            parser = argparse.ArgumentParser()
            parser.add_argument('--model',
                                help='.tflite model path',
                                default=os.path.join(default_model_dir,
                                                     default_model))
            parser.add_argument('--labels',
                                help='label file path',
                                default=os.path.join(default_model_dir,
                                                     default_labels))
            parser.add_argument(
                '--top_k',
                type=int,
                default=3,
                help='number of categories with highest score to display')
            parser.add_argument('--threshold',
                                type=float,
                                default=0.1,
                                help='classifier score threshold')
            parser.add_argument('--videosrc',
                                help='Which video source to use. ',
                                default='/dev/video0')
            parser.add_argument('--videofmt',
                                help='Input video format.',
                                default='raw',
                                choices=['raw', 'h264', 'jpeg'])
            args = parser.parse_args()

            print('Loading {} with {} labels.'.format(args.model, args.labels))
            interpreter = common.make_interpreter(args.model)
            interpreter.allocate_tensors()
            labels = load_labels(args.labels)

            w, h, _ = common.input_image_size(interpreter)
            inference_size = (w, h)
            # Average fps over last 30 frames.
            fps_counter = common.avg_fps_counter(30)
            result = gstreamer.run_pipeline(
                user_callback,  # Calling Scanning Image Function
                src_size=(640, 480),
                appsink_size=inference_size,
                videosrc=args.videosrc,
                videofmt=args.videofmt)

            # Communication with ESP32 Board
            if access == 1:
                gpio8.write(True)
                gpio7.write(False)
                while (gpio6.read() == False):
                    time.sleep(0.05)
                gpio7.write(True)
            elif access == 0:
                gpio8.write(False)
                gpio7.write(False)
                while (gpio6.read() == False):
                    time.sleep(0.05)
                gpio7.write(True)

        time.sleep(2)
Пример #9
0
def main():

    # Flag to also show video
    show_display = True

    # Model path parameter
    detection_model_path = '/home/mendel/handdetection_ssdmobilenetv1.tflite'
    classification_model_path = '/home/mendel/handclassification_mobilenet_v2_1.0_224_quant_edgetpu.tflite'

    #####

    print('Loading {} for hand detection.'.format(detection_model_path))
    detection_interpreter = common.make_interpreter(detection_model_path)
    detection_interpreter.allocate_tensors()

    print('Loading {} for hand classification.'.format(
        classification_model_path))
    classification_interpreter = common.make_interpreter(
        classification_model_path)
    classification_interpreter.allocate_tensors()

    w, h, _ = common.input_image_size(detection_interpreter)
    inference_size = (w, h)
    print("Inference size {},{}".format(w, h))

    # Average fps over last 30 frames.
    fps_counter = common.avg_fps_counter(30)

    def user_callback(input_tensor, src_size, inference_box):
        nonlocal fps_counter
        start_time = time.monotonic()

        # Run hand detection
        common.set_input(detection_interpreter, input_tensor)
        detection_interpreter.invoke()
        detection_results = get_detection_output(detection_interpreter)

        # Resize image and set as input
        buf = input_tensor
        _, map_info = buf.map(Gst.MapFlags.READ)
        np_input = np.ndarray(shape=(h, w, 3),
                              dtype=np.uint8,
                              buffer=map_info.data)
        pil_input = Image.fromarray(np_input)
        pil_input = pil_input.resize((224, 224), Image.NEAREST)
        np_input = np.asarray(pil_input)
        common.input_tensor(classification_interpreter)[:, :] = np_input

        # Run hand classification
        classification_interpreter.invoke()
        classification_results = get_classification_output(
            classification_interpreter)

        end_time = time.monotonic()

        if show_display:
            return generate_svg(src_size, detection_results,
                                classification_results)
        return

    result = gstreamer.run_pipeline(user_callback,
                                    src_size=(640, 480),
                                    appsink_size=inference_size,
                                    show_display=show_display)
Пример #10
0
def main():
    default_model_dir = '../models'
    default_model = 'mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite'
    default_labels = 'coco_labels.txt'
    parser = argparse.ArgumentParser()
    parser.add_argument('--model', help='.tflite model path',
                        default=os.path.join(default_model_dir, default_model))
    parser.add_argument('--labels', help='label file path',
                        default=os.path.join(default_model_dir, default_labels))
    parser.add_argument('--top_k', type=int, default=3,
                        help='number of categories with highest score to display')
    parser.add_argument('--threshold', type=float, default=0.1,
                        help='classifier score threshold')
    parser.add_argument('--videosrc', help='Which video source to use. ',
                        default='/dev/video0')
    parser.add_argument('--videofmt', help='Input video format.',
                        default='raw',
                        choices=['raw', 'h264', 'jpeg'])
    parser.add_argument('--tracker', help='Name of the Object Tracker To be used.',
                        default=None,
                        choices=[None, 'sort'])
    args = parser.parse_args()

    print('Loading {} with {} labels.'.format(args.model, args.labels))
    interpreter = common.make_interpreter(args.model)
    interpreter.allocate_tensors()
    labels = load_labels(args.labels)

    w, h, _ = common.input_image_size(interpreter)
    inference_size = (w, h)
    # Average fps over last 30 frames.
    fps_counter = common.avg_fps_counter(30)

    def user_callback(input_tensor, src_size, inference_box, mot_tracker):
        nonlocal fps_counter
        start_time = time.monotonic()
        common.set_input(interpreter, input_tensor)
        interpreter.invoke()
        # For larger input image sizes, use the edgetpu.classification.engine for better performance
        objs = get_output(interpreter, args.threshold, args.top_k)
        end_time = time.monotonic()
        detections = []  # np.array([])
        for n in range(0, len(objs)):
            element = []  # np.array([])
            element.append(objs[n].bbox.xmin)
            element.append(objs[n].bbox.ymin)
            element.append(objs[n].bbox.xmax)
            element.append(objs[n].bbox.ymax)
            element.append(objs[n].score)  # print('element= ',element)
            detections.append(element)  # print('dets: ',dets)
        # convert to numpy array #      print('npdets: ',dets)
        detections = np.array(detections)
        trdata = []
        trackerFlag = False
        if detections.any():
            if mot_tracker != None:
                trdata = mot_tracker.update(detections)
                trackerFlag = True
            text_lines = [
                'Inference: {:.2f} ms'.format((end_time - start_time) * 1000),
                'FPS: {} fps'.format(round(next(fps_counter))), ]
        if len(objs) != 0:
            return generate_svg(src_size, inference_size, inference_box, objs, labels, text_lines, trdata, trackerFlag)

    result = gstreamer.run_pipeline(user_callback,
                                    src_size=(640, 480),
                                    appsink_size=inference_size,
                                    trackerName=args.tracker,
                                    videosrc=args.videosrc,
                                    videofmt=args.videofmt)