def spotter(self, args):
        engine = BasicEngine(args.model_file)

        mic = args.mic if args.mic is None else int(args.mic)
        model.classify_audio(mic,
                             engine,
                             labels_file="config/labels_gc2.raw.txt",
                             commands_file="config/commands_v2_snake.txt",
                             dectection_callback=self._controler.callback,
                             sample_rate_hz=int(args.sample_rate_hz),
                             num_frames_hop=int(args.num_frames_hop))
Exemple #2
0
def main():
    parser = argparse.ArgumentParser()
    model.add_model_flags(parser)
    args = parser.parse_args()
    engine = BasicEngine(args.model_file)
    mic = args.mic if args.mic is None else int(args.mic)
    model.classify_audio(mic,
                         engine,
                         labels_file="config/labels_gc2.raw.txt",
                         result_callback=print_results,
                         sample_rate_hz=int(args.sample_rate_hz),
                         num_frames_hop=int(args.num_frames_hop))
    def spotter(self, args):
        interpreter = model.make_interpreter(args.model_file)
        interpreter.allocate_tensors()

        mic = args.mic if args.mic is None else int(args.mic)
        model.classify_audio(mic,
                             interpreter,
                             labels_file="config/labels_gc2.raw.txt",
                             commands_file="config/commands_v2_snake.txt",
                             dectection_callback=self._controler.callback,
                             sample_rate_hz=int(args.sample_rate_hz),
                             num_frames_hop=int(args.num_frames_hop))
def main():
    parser = argparse.ArgumentParser()
    model.add_model_flags(parser)
    args = parser.parse_args()
    interpreter = model.make_interpreter(args.model_file)
    interpreter.allocate_tensors()
    mic = args.mic if args.mic is None else int(args.mic)
    model.classify_audio(mic,
                         interpreter,
                         labels_file="config/labels_simple_audio.txt",
                         result_callback=print_results,
                         sample_rate_hz=int(args.sample_rate_hz),
                         num_frames_hop=int(args.num_frames_hop))
Exemple #5
0
def main():
  gpio6.write(False)
  gpio73.write(False)
  parser = argparse.ArgumentParser()
  model.add_model_flags(parser)
  args = parser.parse_args()
  interpreter = model.make_interpreter(args.model_file)
  interpreter.allocate_tensors()
  mic = args.mic if args.mic is None else int(args.mic)
  model.classify_audio(mic, interpreter, 3,
                       labels_file="config/labels_gc2.raw.txt",
                       result_callback=print_results,
                       sample_rate_hz=int(args.sample_rate_hz),
                       num_frames_hop=int(args.num_frames_hop))
  if answer == 1:
    gpio6.write(True)
  elif answer == 2:
    gpio73.write(True)
Exemple #6
0
def main():
  isListening = True
  while True:
    try:
      parser = argparse.ArgumentParser()
      model.add_model_flags(parser)
      args = parser.parse_args()
      interpreter = model.make_interpreter(args.model_file)
      interpreter.allocate_tensors()
      mic = args.mic if args.mic is None else int(args.mic)
      model.classify_audio(mic, interpreter,
                       labels_file="config/labels_gc2.raw.txt",
                       result_callback=print_results,
                       sample_rate_hz=int(args.sample_rate_hz),
                       num_frames_hop=int(args.num_frames_hop))
    except:
      logger.error("crashed and restarting the model: " + str(sys.exc_info()[0]))
      sys.stdout.write("Crashed so trying to restart")
      sys.stdout.write("print Unexpected error" + str(sys.exc_info()[0]))
Exemple #7
0
def main():
    parser = argparse.ArgumentParser()
    model.add_model_flags(parser)
    args = parser.parse_args()
    engine = BasicEngine(args.model_file)
    mic = args.mic if args.mic is None else int(args.mic)
    yt_control = YoutubeControl()
    sys.stdout.write("--------------------\n")
    sys.stdout.write("This script will control Youtube.\n")
    sys.stdout.write("Just ensure that focus is on the YouTube player.\n")
    sys.stdout.write("--------------------\n")

    model.classify_audio(mic,
                         engine,
                         labels_file="config/labels_gc2.raw.txt",
                         commands_file="config/commands_v2.txt",
                         dectection_callback=yt_control.run_command,
                         sample_rate_hz=int(args.sample_rate_hz),
                         num_frames_hop=int(args.num_frames_hop))
Exemple #8
0
def main():

    def user_callback(input_tensor, src_size, inference_box):
        global access
        global house
        global parcel
        nonlocal fps_counter
        start_time = time.monotonic()
        common.set_input(interpreter, input_tensor)
        interpreter.invoke()
        # For larger input image sizes, use the edgetpu.classification.engine for better performance
        results = get_output(interpreter, args.top_k, args.threshold)
        end_time = time.monotonic()
        text_lines = [
            ' ',
            'Inference: {:.2f} ms'.format((end_time - start_time) * 1000),
            'FPS: {} fps'.format(round(next(fps_counter))),
        ]
        for result in results:
            text_lines.append('score={:.2f}: {}'.format(result.score, labels.get(result.id, result.id)))
            if house:
                if labels.get(result.id, result.id) == "tree frog, tree-frog":
                    access = 1
                    gpio6.write(True)
                    Gtk.main_quit()
                elif labels.get(result.id, result.id) == "acoustic guitar" or labels.get(result.id, result.id) == "jigsaw puzzle" or labels.get(result.id, result.id) == "jellyfish" or labels.get(result.id, result.id) == "basketball" or labels.get(result.id, result.id) == "soccer ball":
                    access = 0
                    gpio73.write(True)
                    Gtk.main_quit()
            elif parcel:
                if labels.get(result.id, result.id) == "acoustic guitar": 
                    access = 1
                    gpio7.write(True)
                    Gtk.main_quit()
                elif labels.get(result.id, result.id) == "tree frog, tree-frog" or labels.get(result.id, result.id) == "jigsaw puzzle" or labels.get(result.id, result.id) == "jellyfish" or labels.get(result.id, result.id) == "basketball" or labels.get(result.id, result.id) == "soccer ball":
                    access = 0
                    gpio8.write(True)
                    Gtk.main_quit()
                
        print(' '.join(text_lines))
        return generate_svg(src_size, text_lines)

    while(1):
        global access
        global answer
        global house
        global parcel
        
        gpio6.write(False)
        gpio73.write(False)
        gpio7.write(False)
        gpio8.write(False)

        motion.start_ranging(VL53L0X.VL53L0X_BETTER_ACCURACY_MODE)
        timing = motion.get_timing()
        if (timing < 20000):
            timing = 20000
        distance = motion.get_distance()
        while(distance > 500):
            distance = motion.get_distance()
            time.sleep(timing/1000000.00)
        motion.stop_ranging()

        wave_obj = sa.WaveObject.from_wave_file("welcome.wav")
        play_obj = wave_obj.play()
        play_obj.wait_done()
        wave_obj = sa.WaveObject.from_wave_file("entry.wav")
        play_obj = wave_obj.play()
        play_obj.wait_done()
        
        # Voice Recognition
        parser = argparse.ArgumentParser()
        model.add_model_flags(parser)
        args = parser.parse_args()
        interpreter = model.make_interpreter(args.model_file)
        interpreter.allocate_tensors()
        mic = args.mic if args.mic is None else int(args.mic)
        model.classify_audio(mic, interpreter, 1,
                     labels_file="config/labels_gc2.raw.txt",
                     result_callback=print_results,
                     sample_rate_hz=int(args.sample_rate_hz),
                     num_frames_hop=int(args.num_frames_hop))
        if answer == 1:
            wave_obj = sa.WaveObject.from_wave_file("key.wav")
            play_obj = wave_obj.play()
            play_obj.wait_done()
            answer = 0
            house = True
            parcel = False
        elif answer == 2:
            wave_obj = sa.WaveObject.from_wave_file("package.wav")
            play_obj = wave_obj.play()
            play_obj.wait_done()
            answer = 0
            house = False
            # Voice Recognition
            model.classify_audio(mic, interpreter, 2,
                        labels_file="config/labels_gc2.raw.txt",
                        result_callback=print_results,
                        sample_rate_hz=int(args.sample_rate_hz),
                        num_frames_hop=int(args.num_frames_hop))
            if answer == 1:
                wave_obj = sa.WaveObject.from_wave_file("key.wav")
                play_obj = wave_obj.play()
                play_obj.wait_done()
                answer = 0
                parcel = True
            elif answer == 2:
                wave_obj = sa.WaveObject.from_wave_file("goodday.wav")
                play_obj = wave_obj.play()
                play_obj.wait_done()
                answer = 0
                parcel = False
        if house or parcel:
            default_model_dir = '../all_models'
            default_model = 'mobilenet_v2_1.0_224_quant_edgetpu.tflite'
            default_labels = 'imagenet_labels.txt'
            parser = argparse.ArgumentParser()
            parser.add_argument('--model', help='.tflite model path',
                                default=os.path.join(default_model_dir,default_model))
            parser.add_argument('--labels', help='label file path',
                                default=os.path.join(default_model_dir, default_labels))
            parser.add_argument('--top_k', type=int, default=3,
                                help='number of categories with highest score to display')
            parser.add_argument('--threshold', type=float, default=0.1,
                                help='classifier score threshold')
            parser.add_argument('--videosrc', help='Which video source to use. ',
                                default='/dev/video0')
            parser.add_argument('--videofmt', help='Input video format.',
                                default='raw',
                                choices=['raw', 'h264', 'jpeg'])
            args = parser.parse_args()

            print('Loading {} with {} labels.'.format(args.model, args.labels))
            interpreter = common.make_interpreter(args.model)
            interpreter.allocate_tensors()
            labels = load_labels(args.labels)

            w, h, _  = common.input_image_size(interpreter)
            inference_size = (w, h)
            # Average fps over last 30 frames.
            fps_counter = common.avg_fps_counter(30)
            result = gstreamer.run_pipeline(user_callback,
                                        src_size=(640, 480),
                                        appsink_size=inference_size,
                                        videosrc=args.videosrc,
                                        videofmt=args.videofmt)
            if access:
                if house:
                    wave_obj = sa.WaveObject.from_wave_file("stay.wav")
                elif parcel:
                    wave_obj = sa.WaveObject.from_wave_file("parcel.wav")
                play_obj = wave_obj.play()
                play_obj.wait_done()
            else:
                wave_obj = sa.WaveObject.from_wave_file("denied.wav")
                play_obj = wave_obj.play()
                play_obj.wait_done()
        
        time.sleep(3)
Exemple #9
0
def main():

    #///-----------------------------------------------------------\\\
    #//                    Scanning Image                           \\

    def user_callback(input_tensor, src_size, inference_box):
        global access
        global house
        global parcel
        nonlocal fps_counter
        start_time = time.monotonic()
        common.set_input(interpreter, input_tensor)
        interpreter.invoke()
        # For larger input image sizes, use the edgetpu.classification.engine for better performance
        results = get_output(interpreter, args.top_k, args.threshold)
        end_time = time.monotonic()
        text_lines = [
            ' ',
            'Inference: {:.2f} ms'.format((end_time - start_time) * 1000),
            'FPS: {} fps'.format(round(next(fps_counter))),
        ]
        for result in results:
            text_lines.append('score={:.2f}: {}'.format(
                result.score, labels.get(result.id, result.id)))
            if gpio6.read() == True:
                access = 2
                Gtk.main_quit()
            elif house:
                if labels.get(
                        result.id, result.id
                ) == "tree frog, tree-frog" and result.score > 0.3:
                    access = 1
                    Gtk.main_quit()
                elif (labels.get(result.id, result.id) == "acoustic guitar"
                      or labels.get(result.id, result.id) == "jigsaw puzzle"
                      or labels.get(result.id, result.id) == "jellyfish"
                      or labels.get(result.id, result.id) == "basketball"
                      or labels.get(result.id, result.id)
                      == "soccer ball") and result.score > 0.3:
                    access = 0
                    Gtk.main_quit()
            elif parcel:
                if labels.get(
                        result.id,
                        result.id) == "acoustic guitar" and result.score > 0.3:
                    access = 1
                    Gtk.main_quit()
                elif (labels.get(result.id,
                                 result.id) == "tree frog, tree-frog"
                      or labels.get(result.id, result.id) == "jigsaw puzzle"
                      or labels.get(result.id, result.id) == "jellyfish"
                      or labels.get(result.id, result.id) == "basketball"
                      or labels.get(result.id, result.id)
                      == "soccer ball") and result.score > 0.3:
                    access = 0
                    Gtk.main_quit()

        print(' '.join(text_lines))
        return generate_svg(src_size, text_lines)


#\\                                                             //
#\\\-----------------------------------------------------------///

    while (1):
        global access
        global answer
        global house
        global parcel

        gpio7.write(True)
        gpio8.write(True)
        while (gpio6.read() == False):  #  Waiting for signal
            time.sleep(0.05)
        time.sleep(2)

        # Setting up voice recogniton
        parser = argparse.ArgumentParser()
        model.add_model_flags(parser)
        args = parser.parse_args()
        interpreter = model.make_interpreter(args.model_file)
        interpreter.allocate_tensors()
        mic = args.mic if args.mic is None else int(args.mic)
        model.classify_audio(
            mic,
            interpreter,
            1,  # Calling Listening Function
            labels_file="config/labels_gc2.raw.txt",
            result_callback=print_results,
            sample_rate_hz=int(args.sample_rate_hz),
            num_frames_hop=int(args.num_frames_hop))

        if answer == 3:  # Timed out
            answer = 0
            house = False
            parcel = False
        elif answer == 1:  # Yes
            gpio8.write(True)
            gpio7.write(False)
            while (gpio6.read() == False):
                time.sleep(0.05)
            gpio7.write(True)
            answer = 0
            house = True
            parcel = False

        elif answer == 2:  # No
            gpio8.write(False)
            gpio7.write(False)
            while (gpio6.read() == False):
                time.sleep(0.05)
            gpio7.write(True)
            answer = 0
            house = False
            time.sleep(1)
            model.classify_audio(
                mic,
                interpreter,
                2,  # Calling Listening Function
                labels_file="config/labels_gc2.raw.txt",
                result_callback=print_results,
                sample_rate_hz=int(args.sample_rate_hz),
                num_frames_hop=int(args.num_frames_hop))
            if answer == 3:  # Timed out
                answer = 0
                parcel = False
            elif answer == 1:  # Yes
                gpio8.write(True)
                gpio7.write(False)
                while (gpio6.read() == False):
                    time.sleep(0.05)
                gpio7.write(True)
                answer = 0
                parcel = True
            elif answer == 2:  # No
                gpio8.write(False)
                gpio7.write(False)
                while (gpio6.read() == False):
                    time.sleep(0.05)
                gpio7.write(True)
                answer = 0
                parcel = False
        if house or parcel:
            # Setting up image recogniton
            default_model_dir = '../all_models'
            default_model = 'mobilenet_v2_1.0_224_quant_edgetpu.tflite'
            default_labels = 'imagenet_labels.txt'
            parser = argparse.ArgumentParser()
            parser.add_argument('--model',
                                help='.tflite model path',
                                default=os.path.join(default_model_dir,
                                                     default_model))
            parser.add_argument('--labels',
                                help='label file path',
                                default=os.path.join(default_model_dir,
                                                     default_labels))
            parser.add_argument(
                '--top_k',
                type=int,
                default=3,
                help='number of categories with highest score to display')
            parser.add_argument('--threshold',
                                type=float,
                                default=0.1,
                                help='classifier score threshold')
            parser.add_argument('--videosrc',
                                help='Which video source to use. ',
                                default='/dev/video0')
            parser.add_argument('--videofmt',
                                help='Input video format.',
                                default='raw',
                                choices=['raw', 'h264', 'jpeg'])
            args = parser.parse_args()

            print('Loading {} with {} labels.'.format(args.model, args.labels))
            interpreter = common.make_interpreter(args.model)
            interpreter.allocate_tensors()
            labels = load_labels(args.labels)

            w, h, _ = common.input_image_size(interpreter)
            inference_size = (w, h)
            # Average fps over last 30 frames.
            fps_counter = common.avg_fps_counter(30)
            result = gstreamer.run_pipeline(
                user_callback,  # Calling Scanning Image Function
                src_size=(640, 480),
                appsink_size=inference_size,
                videosrc=args.videosrc,
                videofmt=args.videofmt)

            # Communication with ESP32 Board
            if access == 1:
                gpio8.write(True)
                gpio7.write(False)
                while (gpio6.read() == False):
                    time.sleep(0.05)
                gpio7.write(True)
            elif access == 0:
                gpio8.write(False)
                gpio7.write(False)
                while (gpio6.read() == False):
                    time.sleep(0.05)
                gpio7.write(True)

        time.sleep(2)